repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
caseyjlaw/tpipe | leanpipedt.py | 1 | 51791 | ##########################################
# functional style, uses multiprocessing #
# this version threads within processing #
##########################################
import numpy as n
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import applytelcal, applycals2
from scipy import signal
from math import ceil
import multiprocessing as mp
import string, os, ctypes, types
import cPickle as pickle
import time as timestamp
import leanpipedt_cython as lib
#import leanpipe_external as lib
import qimg_cython as qimg
# Optional imports that have extra dependencies
# can choose fft from numpy or pyfftw.
from numpy import fft as fft
#import pyfftw.interfaces.NUMPY_fft as fft
#
# optionally can use CASA outside of casapy session. requires hacking CASA shared-object libraries...
#import casautil
#ms = casautil.tools.ms()
#qa = casautil.tools.quanta()
def numpyview(arr, datatype, shape):
""" Takes mp.Array and returns numpy array with shape of data in MS.
"""
# return n.frombuffer(arr.get_obj()).view(n.dtype(datatype)).reshape((iterint, nbl, nchan, npol))
return n.frombuffer(arr.get_obj(), dtype=n.dtype(datatype)).view(n.dtype(datatype)).reshape(shape)
def calc_hexcenters(fwhmsurvey, fwhmfield, show=0):
""" Tile a large circular area with a small circular areas. sizes are assumed to be fwhm. assumes flat sky.
"""
large = fwhmsurvey
small = fwhmfield
centers = []
(l0,m0) = (0.,0.)
centers.append((l0,m0))
l1 = l0-(small/2.)*n.cos(n.radians(60))
m1 = m0-(small/2.)*n.sin(n.radians(60))
ii = 0
while ( n.sqrt((l1-l0)**2+(m1-m0)**2) < large/2.):
l1 = l1+((-1)**ii)*(small/2.)*n.cos(n.radians(60))
m1 = m1+(small/2.)*n.sin(n.radians(60))
l2 = l1+small/2
m2 = m1
while ( n.sqrt((l2-l0)**2+(m2-m0)**2) < large/2.):
centers.append((l2,m2))
l2 = l2+small/2
l2 = l1-small/2
m2 = m1
while ( n.sqrt((l2-l0)**2+(m2-m0)**2) < large/2.):
centers.append((l2,m2))
l2 = l2-small/2
ii = ii+1
l1 = l0
m1 = m0
ii = 0
while ( n.sqrt((l1-l0)**2+(m1-m0)**2) < large/2.):
l1 = l1-((-1)**ii)*(small/2.)*n.cos(n.radians(60))
m1 = m1-(small/2.)*n.sin(n.radians(60))
l2 = l1
m2 = m1
while ( n.sqrt((l2-l0)**2+(m2-m0)**2) < large/2.):
centers.append((l2,m2))
l2 = l2+small/2
l2 = l1-small/2
m2 = m1
while ( n.sqrt((l2-l0)**2+(m2-m0)**2) < large/2.):
centers.append((l2,m2))
l2 = l2-small/2
ii = ii+1
delaycenters = n.array(centers)
if len(delaycenters) == 1:
plural = ''
else:
plural = 's'
print 'For a search area of %.3f and delay beam of %.3f, we will use %d delay beam%s' % (fwhmsurvey, fwhmfield, len(delaycenters), plural)
return delaycenters
def detect_bispectra(ba, d, sigma=5., tol=1.3, Q=0, show=0, save=0, verbose=0):
""" Function to detect transient in bispectra
sigma gives the threshold for SNR_bisp (apparent).
tol gives the amount of tolerance in the sigma_b cut for point-like sources (rfi filter).
Q is noise per baseline and can be input. Otherwise estimated from data.
save=0 is no saving, save=1 is save with default name, save=<string>.png uses custom name (must include .png).
"""
# using s=S/Q
mu = lambda s: 1. # for bispectra formed from visibilities
sigbQ3 = lambda s: n.sqrt((1 + 3*mu(s)**2) + 3*(1 + mu(s)**2)*s**2 + 3*s**4) # from kulkarni 1989, normalized by Q**3, also rogers et al 1995
s = lambda basnr, ntr: (2.*basnr/n.sqrt(ntr))**(1/3.) # see rogers et al. 1995 for factor of 2
# measure SNR_bl==Q from sigma clipped times with normal mean and std of bispectra. put into time,dm order
bamean = ba.real.mean(axis=1)
bastd = ba.real.std(axis=1)
(meanmin,meanmax) = lib.sigma_clip(bamean) # remove rfi to estimate noise-like parts
(stdmin,stdmax) = lib.sigma_clip(bastd)
clipped = n.where((bamean > meanmin) & (bamean < meanmax) & (bastd > stdmin) & (bastd < stdmax) & (bamean != 0.0))[0] # remove rfi and zeros
bameanstd = ba[clipped].real.mean(axis=1).std()
basnr = bamean/bameanstd # = S**3/(Q**3 / n.sqrt(n_tr)) = s**3 * n.sqrt(n_tr)
if Q and verbose:
print 'Using given Q =', Q
else:
Q = ((bameanstd/2.)*n.sqrt(d['ntr']))**(1/3.)
if verbose:
print 'Estimating noise per baseline from data. Q (per DM) =', Q
# detect
cands = n.where( (bastd/Q**3 < tol*sigbQ3(s(basnr, d['ntr']))) & (basnr > sigma) ) # get compact sources with high snr
if show or save:
p.figure()
ax = p.axes()
p.subplot(211)
p.title(str(d['nskip']) + ' nskip, ' + str(len(cands))+' candidates', transform = ax.transAxes)
p.plot(basnr, 'b.')
if len(cands[0]) > 0:
p.plot(cands, basnr[cands], 'r*')
p.ylim(-2*basnr[cands].max(),2*basnr[cands].max())
p.xlabel('Integration',fontsize=12,fontweight="bold")
p.ylabel('SNR_b',fontsize=12,fontweight="bold")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_position(('outward', 20))
ax.spines['left'].set_position(('outward', 30))
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
p.subplot(212)
p.plot(bastd/Q**3, basnr, 'b.')
# plot reference theory lines
smax = s(basnr.max(), d['nants'])
sarr = smax*n.arange(0,101)/100.
p.plot(sigbQ3(sarr), 1/2.*sarr**3*n.sqrt(d['ntr']), 'k')
p.plot(tol*sigbQ3(sarr), 1/2.*sarr**3*n.sqrt(d['ntr']), 'k--')
if len(cands[0]) > 0:
p.plot(bastd[cands]/Q**3, basnr[cands], 'r*')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_position(('outward', 20))
ax.spines['left'].set_position(('outward', 30))
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
if len(cands[0]) > 0:
p.axis([0, tol*sigbQ3(s(basnr[cands].max(), d['nants'])), -0.5*basnr[cands].max(), 1.1*basnr[cands].max()])
# show spectral modulation next to each point
for candint in cands:
sm = n.single(round(specmod(data, d, candint),1))
p.text(bastd[candint]/Q**3, basnr[candint], str(sm), horizontalalignment='right', verticalalignment='bottom')
p.xlabel('sigma_b/Q^3',fontsize=12,fontweight="bold")
p.ylabel('SNR_b',fontsize=12,fontweight="bold")
if save:
if save == 1:
savename = d['filename'].split('.')[:-1]
savename.append(str(d['nskip']) + '_bisp.png')
savename = string.join(savename,'.')
elif isinstance(save, types.StringType):
savename = save
print 'Saving file as ', savename
p.savefig(self.pathout+savename)
return cands[0], basnr, bastd, Q
def estimate_noiseperbl(data0):
""" Takes large data array and sigma clips it to find noise per bl for input to detect_bispectra.
Takes mean across pols and channels for now, as in detect_bispectra.
"""
# define noise per baseline for data seen by detect_bispectra or image
data0mean = data0.mean(axis=2).imag # use imaginary part to estimate noise without calibrated, on-axis signal
(data0meanmin, data0meanmax) = lib.sigma_clip(data0mean.flatten())
good = n.where( (data0mean>data0meanmin) & (data0mean<data0meanmax) )
noiseperbl = data0mean[good].std() # measure single noise for input to detect_bispectra
print 'Sigma clip of %.3f to %.3f keeps %d%% of data' % (data0meanmin, data0meanmax, (100.*len(good[0]))/len(data0mean.flatten()))
print 'Estimate of noise per baseline: %.3f' % noiseperbl
return noiseperbl
def save(d, cands, verbose=0):
""" Save all candidates in pkl file for later aggregation and filtering.
"""
if len(cands):
loclist = []
proplist = []
for cand in cands:
# first unpack
# (beamnum, dtind, i, dmind, snr, img, specpol) = cand # big set
(beamnum, dtind, i, dmind, snr, lm, snr2, lm2) = cand # midsize set
# then build list to dump
loclist.append( [beamnum, dtind, i, dmind] )
# proplist.append( (snr, img, specpol) ) # full set
proplist.append( [snr, lm[0], lm[1], snr2, lm2[0], lm2[1]] ) # midsize set
if verbose:
print loclist, proplist
# save candidate info in pickle file
pkl = open(d['candsfile'], 'a')
pickle.dump((loclist, proplist), pkl)
pkl.close()
else:
if verbose:
print 'No cands to save...'
def imgallloop(d, dmind, dtind, beamnum):
""" Parallelizable function for imaging a chunk of data for a single dm.
runs cython qimg library for image, then filters results based on spectral modulation of candidate.
"""
# THIS ONE does the uv gridding and searches for the candidates
# NOTE, the qimg_cython.pyx defines all the different imaging algorithms Casey has written.
twindow = 15 # window to save for plotting data in pickle
# dedisperse using global 'data'
data0 = dataprep(d, dmind, dtind) # returns masked array of dedispersed and stitched data
ims,snr,candints = qimg.imgallfullfilterxy(n.outer(u[d['iterint']/2], d['freq']/d['freq_orig'][0]), n.outer(v[d['iterint']/2], d['freq']/d['freq_orig'][0]), data0.data, d['sizex'], d['sizey'], d['res'], d['sigma_image'])
# IF WE FOUND CANDIDATES, MAKE THEIR CANDIDATE PLOTS
if len(candints) > 0:
# spectra = []
goodcandints = []
lmarr = []; lm2arr = []
snr2arr = []
for i in xrange(len(candints)):
# phase shift to get dynamic spectrum
peakl, peakm = n.where(ims[i] == ims[i].max()) # assumes new style u->u and v->v gridding
l1 = (float((d['sizex'])/d['res'])/2. - peakl[0])/d['sizex']
m1 = (float((d['sizey'])/d['res'])/2. - peakm[0])/d['sizey']
if d['secondaryfilter'] == 'specmod': # filter by spectral modulation
# return spectrogram per pol
# minint = max(candints[i]-twindow, 0)
# maxint = min(candints[i]+twindow, len(data0))
data0_snip = data0[candints[i]].copy() # get candidate integration
lib.phaseshift_threaded(data0_snip[None,:,:,:], d, l1, m1, u[candints[i]], v[candints[i]])
# snipint = min(candints[i],twindow) # correct for edge effects
# print i, candints[i], minint, maxint, peakl, peakm, snipint, data0_snip.mean()
bfspec = data0_snip.mean(axis=0).mean(axis=1).real # mean over bl and pol for known int
# bflc = data0_snip.mean(axis=3).mean(axis=2).mean(axis=1).real # mean over ch, bl and pol
# snrlc = bflc[snipint] / bflc[range(0,snipint-1)+range(snipint+2,twindow)].std() # lc snr of event. more accurate than img snr
sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )
if sm < n.sqrt(d['nchan']/snr[i]**2 + d['specmodfilter']):
print 'Got one! Int=%d, DM=%d, dt=%d, SNR_im=%.1f @ (%d,%d), SM=%.1f < %.1f, so keeping candidate.' % (d['nskip']+d['itercount']+candints[i]*d['dtarr'][dtind], d['dmarr'][dmind], d['dtarr'][dtind], snr[i], peakl, peakm, sm, n.sqrt(d['nchan']/snr[i]**2 + d['specmodfilter']))
# spectra.append(data0_snip.mean(axis=1)) # get real part of spectrogram of candidate
goodcandints.append(i)
lmarr.append( (l1, m1) )
else:
print 'Almost... Int=%d, DM=%d, dt=%d, SNR_im=%.1f @ (%d,%d), SM=%.1f > %.1f, so rejecting candidate.' % (d['nskip']+d['itercount']+candints[i]*d['dtarr'][dtind], d['dmarr'][dmind], d['dtarr'][dtind], snr[i], peakl, peakm, sm, n.sqrt(d['nchan']/snr[i]**2 + d['specmodfilter']))
elif d['secondaryfilter'] == 'fullim': # filter with an image of all data
im2 = qimg.imgonefullxy(n.outer(u[candints[i]], d['freq']/d['freq_orig'][0]), n.outer(v[candints[i]], d['freq']/d['freq_orig'][0]), data0.data[candints[i]], d['full_sizex'], d['full_sizey'], d['res'])
snr2 = im2.max()/im2.std()
peakl2, peakm2 = n.where(im2 == im2.max()) # assumes new style u->u and v->v gridding
l2 = (float((d['full_sizex'])/d['res'])/2. - peakl2[0])/d['full_sizex']
m2 = (float((d['full_sizey'])/d['res'])/2. - peakm2[0])/d['full_sizey']
if snr2 > d['sigma_image']:
print 'Got one! Int=%d, DM=%d, dt=%d: SNR_im=%.1f @ (%.2e,%.2e) and SNR2=%.1f @ (%.2e, %.2e), so keeping candidate.' % (d['nskip']+d['itercount']+candints[i]*d['dtarr'][dtind], d['dmarr'][dmind], d['dtarr'][dtind], snr[i], l1, m1, snr2, l2, m2)
goodcandints.append(i)
lmarr.append( (l1, m1) )
lm2arr.append( (l2, m2) )
snr2arr.append(snr2)
else:
print 'Almost... Int=%d, DM=%d, dt=%d: SNR_im=%.1f @ (%.2e,%.2e) and SNR2=%.1f @ (%.2e, %.2e), so rejecting candidate.' % (d['nskip']+d['itercount']+candints[i]*d['dtarr'][dtind], d['dmarr'][dmind], d['dtarr'][dtind], snr[i], l1, m1, snr2, l2, m2)
if d['secondaryfilter'] == 'specmod': # filter by spectral modulation
return [(beamnum, dtind, d['nskip']+d['itercount']+candints[goodcandints[i]]*d['dtarr'][dtind], dmind, snr[goodcandints[i]], lmarr[i]) for i in xrange(len(goodcandints))] # smaller data returned
elif d['secondaryfilter'] == 'fullim': # filter with an image of all data
return [(beamnum, dtind, d['nskip']+d['itercount']+candints[goodcandints[i]]*d['dtarr'][dtind], dmind, snr[goodcandints[i]], lmarr[i], snr2arr[i], lm2arr[i]) for i in xrange(len(goodcandints))] # smaller data returned
# return [(beamnum, dtind, d['nskip']+d['itercount']+candints[goodcandints[i]]*d['dtarr'][dtind], dmind, snr[goodcandints[i]], ims[goodcandints[i]], spectra[i]) for i in xrange(len(goodcandints))] # return data coods (delta_t, int, dm) and properties (snr, image, spectrum*pol)
else:
return 0
# return ( n.empty( (0,7) ) )
def time_filter(data0, d, width, show=0):
""" Replaces data array with filtered version via convolution in time. Note that this has trouble with zeroed data.
kernel specifies the convolution kernel. 'm' for mexican hat (a.k.a. ricker, effectively does bg subtraction), 'g' for gaussian. 't' for a tophat. 'b' is a tophat with bg subtraction (or square 'm'). 'w' is a tophat with width that varies with channel, as kept in 'twidth[dmind]'.
width is the kernel width with length nchan. should be tuned to expected pulse width in each channel.
bgwindow is used by 'b' only.
An alternate design for this method would be to make a new data array after filtering, so this can be repeated for many assumed widths without reading data in anew. That would require more memory, so going with repalcement for now.
"""
kernel = d['filtershape']
bgwindow = d['bgwindow']
if not isinstance(width, types.ListType):
width = [width] * len(d['chans'])
# time filter by convolution. functions have different normlizations. m has central peak integral=1 and total is 0. others integrate to 1, so they don't do bg subtraction.
kernelset = {} # optionally could make set of kernels. one per width needed. (used only by 'w' for now).
if kernel == 't':
print 'Applying tophat time filter.'
for w in n.unique(width):
kernel = n.zeros(len(data0)) # tophat.
onrange = range(len(kernel)/2 - w/2, len(kernel)/2 + int(ceil(w/2.)))
kernel[onrange] = 1.
kernelset[w] = kernel/n.where(kernel>0, kernel, 0).sum() # normalize to have peak integral=1, thus outside integral=-1.
elif kernel == 'b':
print 'Applying tophat time filter with bg subtraction (square mexican hat) total width=%d.' % (bgwindow)
for w in n.unique(width):
kernel = n.zeros(len(data0)) # tophat.
onrange = range(len(kernel)/2 - w/2, len(kernel)/2 + int(ceil(w/2.)))
kernel[onrange] = 1.
offrange = range(len(kernel)/2 - (bgwindow+w)/2, len(kernel)/2-w/2) + range(len(kernel)/2 + int(ceil(w/2.)), len(kernel)/2 + int(ceil((w+bgwindow)/2.)))
kernel[offrange] = -1.
posnorm = n.where(kernel>0, kernel, 0).sum() # find normalization of positive
negnorm = n.abs(n.where(kernel<0, kernel, 0).sum()) # find normalization of negative
kernelset[w] = n.where(kernel>0, kernel/posnorm, kernel/negnorm) # pos and neg both sum to 1/-1, so total integral=0
elif kernel == 'g':
print 'Applying gaussian time filter. Note that effective width is much larger than equivalent tophat width.'
for w in n.unique(width):
kernel = signal.gaussian(len(data0), w) # gaussian. peak not quite at 1 for widths less than 3, so it is later renormalized.
kernelset[w] = kernel / (w * n.sqrt(2*n.pi)) # normalize to pdf, not peak of 1.
elif kernel == 'w':
print 'Applying tophat time filter that varies with channel.'
for w in n.unique(width):
kernel = n.zeros(len(data0)) # tophat.
onrange = range(len(kernel)/2 - w/2, len(kernel)/2 + int(ceil(w/2.)))
kernel[onrange] = 1.
kernelset[w] = kernel/n.where(kernel>0, kernel, 0).sum() # normalize to have peak integral=1, thus outside integral=-1.
elif kernel == None:
print 'Applying no time filter.'
return data0
if show:
for kernel in kernelset.values():
p.plot(kernel,'.')
p.title('Time filter kernel')
p.show()
# take ffts (in time)
datafft = fft.fft(data0, axis=0)
# kernelsetfft = {}
# for w in n.unique(width):
# kernelsetfft[w] = fft.fft(n.roll(kernelset[w], len(data0)/2)) # seemingly need to shift kernel to have peak centered near first bin if convolving complex array (but not for real array?)
# **take first kernel. assumes single width in hacky way**
kernelsetfft = fft.fft(n.roll(kernelset[kernelset.keys()[0]], len(data0)/2)) # seemingly need to shift kernel to have peak centered near first bin if convolving complex array (but not for real array?)
# filter by product in fourier space
# for i in range(d['nbl']): # **can't find matrix product I need, so iterating over nbl, chans, npol**
# for j in range(len(d['chans'])):
# for k in range(d['npol']):
# datafft[:,i,j,k] = datafft[:,i,j,k]*kernelsetfft[width[j]] # index fft kernel by twidth
datafft = datafft * kernelsetfft[:,None,None,None]
# ifft to restore time series
# return n.ma.masked_array(fft.ifft(datafft, axis=0), self.flags[:self.nints,:, self.chans,:] == 0)
return n.array(fft.ifft(datafft, axis=0))
def specmod(data0, d, ii):
"""Calculate spectral modulation for given track.
Spectral modulation is basically the standard deviation of a spectrum.
This helps quantify whether the flux is located in a narrow number of channels or across all channels.
Broadband signal has small modulation (<sqrt(nchan)/SNR) while RFI has larger values.
See Spitler et al 2012 for details.
"""
bfspec = data0[ii].mean(axis=0).mean(axis=1).real # mean over bl and pol
sm = n.sqrt( ((bfspec**2).mean() - bfspec.mean()**2) / bfspec.mean()**2 )
return sm
def readprep(d):
""" Prepare to read data
"""
filename = d['filename']; spw = d['spw']; iterint = d['iterint']; datacol = d['datacol']; selectpol = d['selectpol']
scan = d['scan']; nints = d['nints']; nskip = d['nskip']
# read metadata either from pickle or ms file
pklname = string.join(filename.split('.')[:-1], '.') + '_init.pkl'
if os.path.exists(pklname):
print 'Pickle of initializing info found. Loading...'
pkl = open(pklname, 'r')
try:
(d['npol_orig'], d['nbl'], d['blarr'], d['inttime'], spwinfo, scansummary) = pickle.load(pkl)
except EOFError:
print 'Bad pickle file. Exiting...'
return 1
scanlist = sorted(scansummary.keys())
starttime_mjd = scansummary[scanlist[scan]]['0']['BeginTime']
else:
print 'No pickle of initializing info found. Making anew...'
pkl = open(pklname, 'wb')
ms.open(filename)
spwinfo = ms.getspectralwindowinfo()
scansummary = ms.getscansummary()
ms.selectinit(datadescid=0) # reset select params for later data selection
selection = {'uvdist': [1., 1e10]} # exclude auto-corrs
ms.select(items = selection)
ms.selectpolarization(selectpol)
scanlist = sorted(scansummary.keys())
starttime_mjd = scansummary[scanlist[scan]]['0']['BeginTime']
d['inttime'] = scansummary[scanlist[scan]]['0']['IntegrationTime']
print 'Initializing integration time (s):', d['inttime']
ms.iterinit(['TIME'], iterint*d['inttime'])
ms.iterorigin()
da = ms.getdata([datacol, 'axis_info'], ifraxis=True)
ms.close()
d['nbl'] = da[datacol].shape[2]
bls = da['axis_info']['ifr_axis']['ifr_shortname']
d['blarr'] = n.array([[int(bls[i].split('-')[0]),int(bls[i].split('-')[1])] for i in xrange(len(bls))])
# d['npol'] = len(selectpol)
d['npol_orig'] = da[datacol].shape[0]
print 'Initializing %d polarizations' % (d['npol'])
pickle.dump((d['npol_orig'], d['nbl'], d['blarr'], d['inttime'], spwinfo, scansummary), pkl)
pkl.close()
# set ants
if len(d['excludeants']):
print 'Excluding ant(s) %s' % d['excludeants']
antlist = list(n.unique(d['blarr']))
d['ants'] = [ant for ant in range(len(antlist)) if antlist[ant] not in d['excludeants']]
d['blarr'] = n.array( [(ant1,ant2) for (ant1,ant2) in d['blarr'] if ((ant1 not in d['excludeants']) and (ant2 not in d['excludeants']))] )
d['nbl'] = len(d['blarr'])
d['nants'] = len(n.unique(d['blarr']))
print 'Initializing nants:', d['nants']
print 'Initializing nbl:', d['nbl']
# define list of spw keys (may not be in order!)
freqs = []
for i in spwinfo.keys():
freqs.append(spwinfo[i]['Chan1Freq'])
d['spwlist'] = n.array(sorted(zip(freqs, spwinfo.keys())))[:,1][spw].astype(int) # spwlist defines order of spw to iterate in freq order
d['freq_orig'] = n.array([])
for spw in d['spwlist']:
nch = spwinfo[str(spw)]['NumChan']
ch0 = spwinfo[str(spw)]['Chan1Freq']
chw = spwinfo[str(spw)]['ChanWidth']
d['freq_orig'] = n.concatenate( (d['freq_orig'], (ch0 + chw * n.arange(nch)) * 1e-9) ).astype('float32')
d['freq'] = d['freq_orig'][d['chans']]
d['nchan'] = len(d['chans'])
print 'Initializing nchan:', d['nchan']
# set requested time range based on given parameters
timeskip = d['inttime']*nskip
starttime = qa.getvalue(qa.convert(qa.time(qa.quantity(starttime_mjd+timeskip/(24.*60*60),'d'),form=['ymd'], prec=9)[0], 's'))[0]
stoptime = qa.getvalue(qa.convert(qa.time(qa.quantity(starttime_mjd+(timeskip+(nints+1)*d['inttime'])/(24.*60*60), 'd'), form=['ymd'], prec=9)[0], 's'))[0] # nints+1 to be avoid buffer running out and stalling iteration
print 'First integration of scan:', qa.time(qa.quantity(starttime_mjd,'d'),form=['ymd'],prec=9)[0]
print
print 'Reading scan', str(scanlist[scan]) ,'for times', qa.time(qa.quantity(starttime_mjd+timeskip/(24.*60*60),'d'),form=['hms'], prec=9)[0], 'to', qa.time(qa.quantity(starttime_mjd+(timeskip+(nints+1)*d['inttime'])/(24.*60*60), 'd'), form=['hms'], prec=9)[0]
# read data into data structure
ms.open(filename)
if len(d['spwlist']) == 1:
ms.selectinit(datadescid=d['spwlist'][0])
else:
ms.selectinit(datadescid=0, reset=True) # reset includes spw in iteration over time
selection = {'time': [starttime, stoptime], 'uvdist': [1., 1e10], 'antenna1': d['ants'], 'antenna2': d['ants']} # exclude auto-corrs
ms.select(items = selection)
ms.selectpolarization(selectpol)
ms.iterinit(['TIME'], iterint*d['inttime'], 0, adddefaultsortcolumns=False) # read with a bit of padding to get at least nints
iterstatus = ms.iterorigin()
d['itercount1'] = 0
d['l0'] = 0.; d['m0'] = 0.
# find full res/size and set actual res/size
d['full_res'] = n.round(25./(3e-1/d['freq'][len(d['freq'])/2])/2).astype('int') # full field of view. assumes freq in GHz
#set actual res/size
if d['res'] == 0:
d['res'] = d['full_res']
da = ms.getdata(['u','v','w'])
uu = n.outer(da['u'], d['freq']).flatten() * (1e9/3e8)
vv = n.outer(da['v'], d['freq']).flatten() * (1e9/3e8)
# **this may let vis slip out of bounds. should really define grid out to 2*max(abs(u)) and 2*max(abs(v)). in practice, very few are lost.**
powers = n.fromfunction(lambda i,j: 2**i*3**j, (12,8), dtype='int') # power array for 2**i * 3**j
rangex = n.round(uu.max() - uu.min()).astype('int')
rangey = n.round(vv.max() - vv.min()).astype('int')
largerx = n.where(powers-rangex/d['res'] > 0, powers, powers[-1,-1])
p2x, p3x = n.where(largerx == largerx.min())
largery = n.where(powers-rangey/d['res'] > 0, powers, powers[-1,-1])
p2y, p3y = n.where(largery == largery.min())
d['full_sizex'] = ((2**p2x * 3**p3x)*d['res'])[0]
d['full_sizey'] = ((2**p2y * 3**p3y)*d['res'])[0]
print 'Ideal uvgrid size=(%d,%d) for res=%d' % (d['full_sizex'], d['full_sizey'], d['res'])
if d['size'] == 0:
d['sizex'] = d['full_sizex']
d['sizey'] = d['full_sizey']
print 'Using uvgrid size=(%d,%d) (2**(%d,%d)*3**(%d,%d) = (%d,%d)) and res=%d' % (d['sizex'], d['sizey'], p2x, p2y, p3x, p3y, 2**p2x*3**p3x, 2**p2y*3**p3y, d['res'])
else:
d['sizex'] = d['size']
d['sizey'] = d['size']
print 'Using uvgrid size=(%d,%d) and res=%d' % (d['sizex'], d['sizey'], d['res'])
d['size'] = max(d['sizex'], d['sizey'])
print 'Image memory usage for %d threads is %d GB' % (d['nthreads'], 8 * d['sizex']/d['res'] * d['sizey']/d['res'] * d['iterint'] * d['nthreads']/1024**3)
return iterstatus
def readiter(d):
""" Iterates over ms.
Returns everything needed for analysis as tuple.
"""
da = ms.getdata([d['datacol'],'axis_info','u','v','w','flag','data_desc_id'], ifraxis=True)
# spws = n.unique(da['data_desc_id']) # spw in use
# good = n.where((da['data_desc_id']) == spws[0])[0] # take first spw
good = n.where((da['data_desc_id']) == d['spwlist'][0])[0] # take first spw
time0 = da['axis_info']['time_axis']['MJDseconds'][good]
data0 = n.transpose(da[d['datacol']], axes=[3,2,1,0])[good]
if d['telcalfile']: # apply telcal solutions
if len(d['spwlist']) > 1:
spwbin = d['spwlist'][0]
else:
spwbin = 0
chanfreq = da['axis_info']['freq_axis']['chan_freq'][:,spwbin]
sols = applytelcal.solutions(d['telcalfile'], chanfreq)
for i in range(len(d['selectpol'])):
try:
sols.setselection(d['telcalcalibrator'], time0[0]/(24*3600), d['selectpol'][i], verbose=0) # chooses solutions closest in time that match pol and source name
sols.apply(data0, d['blarr'], i)
print 'Applied cal for spw %d and pol %s' % (spwbin, d['selectpol'][i])
except:
pass
flag0 = n.transpose(da['flag'], axes=[3,2,1,0])[good]
u0 = da['u'].transpose()[good] * d['freq_orig'][0] * (1e9/3e8) # uvw are in m, so divide by wavelength of first chan to set in lambda
v0 = da['v'].transpose()[good] * d['freq_orig'][0] * (1e9/3e8)
w0 = da['w'].transpose()[good] * d['freq_orig'][0] * (1e9/3e8)
if len(d['spwlist']) > 1:
for spw in d['spwlist'][1:]:
good = n.where((da['data_desc_id']) == spw)[0]
data1 = n.transpose(da[d['datacol']], axes=[3,2,1,0])[good]
if d['telcalfile']: # apply telcal solutions
chanfreq = da['axis_info']['freq_axis']['chan_freq'][:,spw]
sols = applytelcal.solutions(d['telcalfile'], chanfreq)
for i in range(len(d['selectpol'])):
try:
sols.setselection(d['telcalcalibrator'], time0[0]/(24*3600), d['selectpol'][i], verbose=0) # chooses solutions closest in time that match pol and source name
sols.apply(data1, d['blarr'], i)
print 'Applied cal for spw %d and pol %s' % (spw, d['selectpol'][i])
except:
pass
data0 = n.concatenate( (data0, data1), axis=2 )
flag0 = n.concatenate( (flag0, n.transpose(da['flag'], axes=[3,2,1,0])[good]), axis=2 )
del da
data0 = data0[:,:,d['chans'],:] * n.invert(flag0[:,:,d['chans'],:]) # flag==1 means bad data (for vla)
if d['gainfile']:
sols = applycals2.solutions(d['gainfile'], flagants=d['flagantsol'])
sols.parsebp(d['bpfile'])
# sols.setselection(time0[0]/(24*3600.), d['freq']*1e9, d['spw'], d['selectpol'])
sols.setselection(time0[0]/(24*3600.), d['freq']*1e9) # only dualpol, 2sb mode implemented
sols.apply(data0, d['blarr'])
d['iterstatus1'] = ms.iternext()
return data0.astype('complex64'), u0.astype('float32'), v0.astype('float32'), w0.astype('float32'), time0.astype('float32')
def dataprep(d, dmind, dtind, usetrim=True):
""" Takes most recent data read and dedisperses with white space. also adds previously trimmed data.
data2 is next iteration of data of size iterint by ...
usetrim is default behavior, but can be turned off to have large single-segment reading to reproduce cands.
"""
dt = d['dtarr'][dtind]
if d['datadelay'][dmind] >= dt: # if doing dedispersion...
data2 = n.concatenate( (n.zeros( (d['datadelay'][dmind], d['nbl'], d['nchan'], d['npol']), dtype='complex64'), data), axis=0) # prepend with zeros of length maximal dm delay
lib.dedisperse_resample(data2, d['freq'], d['inttime'], d['dmarr'][dmind], dt, verbose=0) # dedisperses data.
if usetrim:
for i in xrange(len(datatrim[dmind][dtind])):
data2[i] = data2[i] + datatrim[dmind][dtind][i]
datatrim[dmind][dtind][:] = data2[d['iterint']/dt: d['iterint']/dt + len(datatrim[dmind][dtind])]
return n.ma.masked_array(data2[:d['iterint']/dt], data2[:d['iterint']/dt] == 0j)
else: # if no dedispersion
data2 = data.copy()
lib.dedisperse_resample(data2, d['freq'], d['inttime'], d['dmarr'][dmind], dt, verbose=0) # only resample data
return n.ma.masked_array(data2[:d['iterint']/dt], data2[:d['iterint']/dt] == 0j)
def readloop(d, eproc, emove):
""" Data generating stage of parallel data function.
data is either read into 'data' buffer, when ready
this keeps data reading bottleneck to 1x the read time.
"""
# now start main work of readloop
iterint = d['iterint']; nbl = d['nbl']; nchan = d['nchan']; npol = d['npol']
# data1_mem = mp.sharedctypes.RawArray(ctypes.c_float, (d['iterint']*d['nbl']*d['nchan']*d['npol']*2)) # x2 to store complex values in single array
datacal_mem = mp.Array(ctypes.c_float, (iterint*nbl*nchan*len(d['selectpol'])*2)) # x2 to store complex values in single array
datacal = numpyview(datacal_mem, 'complex64', (iterint, nbl, nchan, len(d['selectpol'])))
datacap, ucap, vcap, wcap, timecap = readiter(d) # read "cap", a hack to make sure any single iteration has enough integrations (artifact of irregular inttime)
print 'Read first iteration with shape', datacap.shape
while 1:
# name = mp.current_process().name
# print '%s: filling buffer' % name
datanext, unext, vnext, wnext, timenext = readiter(d)
print 'Read next %d ints from iter %d' % (len(datanext), d['itercount1']+iterint)
datanext = n.vstack((datacap,datanext))
unext = n.vstack((ucap,unext))
vnext = n.vstack((vcap,vnext))
wnext = n.vstack((wcap,wnext))
timenext = n.concatenate((timecap,timenext))
if ((len(datanext) < iterint) and d['iterstatus1']): # read once more if data buffer is too small. don't read if no data! iterator gets confused.
datanext2, unext2, vnext2, wnext2, timenext2 = readiter(d)
print 'Read another %d ints for iter %d' % (len(datanext2), d['itercount1']+iterint)
datanext = n.vstack((datanext,datanext2))
unext = n.vstack((unext,unext2))
vnext = n.vstack((vnext,vnext2))
wnext = n.vstack((wnext,wnext2))
timenext = n.concatenate((timenext,timenext2))
del datanext2, unext2, vnext2, wnext2, timenext2 # clean up
# select just the next iteration's worth of data and metadata. leave rest for next iteration's buffer cap.
if len(datanext) >= iterint:
datacal[:] = datanext[:iterint]
datacap = datanext[iterint:] # save rest for next iteration
u1 = unext[:iterint]
ucap = unext[iterint:]
v1 = vnext[:iterint]
vcap = vnext[iterint:]
w1 = wnext[:iterint]
wcap = wnext[iterint:]
time1 = timenext[:iterint]
timecap = timenext[iterint:]
# optionally can insert transient here
# lib.phaseshift(data1, d, n.radians(0.1), n.radians(0.), u, v) # phase shifts data in place
# data1[100] = data1[100] + 10+0j
# lib.phaseshift(data1, d, n.radians(0.), n.radians(0.1), u, v) # phase shifts data in place
# flag data before moving into place
# bg subtract in time
if d['filtershape']:
if d['filtershape'] == 'z': # 'z' means do zero-mean subtraction in time
pass
else: # otherwise do fft convolution
datacal = time_filter(datacal, d, 1) # assumes pulse width of 1 integration
# flag data
if (d['flagmode'] == 'standard'):
lib.dataflag(datacal, d, 2.5, convergence=0.05, mode='badch')
lib.dataflag(datacal, d, 3., mode='badap')
lib.dataflag(datacal, d, 4., convergence=0.1, mode='blstd')
lib.dataflag(datacal, d, 4., mode='ring')
else:
print 'No real-time flagging.'
if d['filtershape'] == 'z':
print 'Subtracting mean visibility in time...'
lib.meantsub(datacal)
# write noise pkl with: itercount, noiseperbl, zerofrac, imstd_midtdm0
noiseperbl = estimate_noiseperbl(datacal)
if d['savecands'] and n.any(datacal[d['iterint']/2]):
imstd = qimg.imgonefullxy(n.outer(u1[d['iterint']/2], d['freq']/d['freq_orig'][0]), n.outer(v1[d['iterint']/2], d['freq']/d['freq_orig'][0]), datacal[d['iterint']/2], d['sizex'], d['sizey'], d['res']).std()
zerofrac = float(len(n.where(datacal == 0j)[0]))/datacal.size
noisefile = 'noise_' + string.join(d['candsfile'].split('_')[1:-1], '_') + '.pkl'
pkl = open(noisefile,'a')
pickle.dump( (d['itercount1'], noiseperbl, zerofrac, imstd), pkl )
pkl.close()
# after cal and flagging, can optionally average to Stokes I to save memory
# do this after measuring noise, etc to keep zero counting correct in imaging
if 'lowmem' in d['searchtype']:
datacal[...,0] = datacal.sum(axis=3)
# emove is THE MOVE EVENT THAT WAITS FOR PROCESSOR TO TELL IT TO GO
# wait for signal to move everything to processing buffers
print 'Ready to move data into place for itercount ', d['itercount1']
emove.wait()
emove.clear()
if 'lowmem' in d['searchtype']:
data[...,0] = datacal[...,0]
else:
data[:] = datacal[:]
# flag[:] = flag1[:]
u[:] = u1[:]
v[:] = v1[:]
w[:] = w1[:]
time[:] = time1[:]
d['itercount'] = d['itercount1']
d['iterstatus'] = d['iterstatus1']
d['itercount1'] += iterint
eproc.set() # reading buffer filled, start processing
# NOW MAKE SURE ALL ENDS GRACEFULLY
else:
print 'End of data (in buffer)'
d['iterstatus'] = False # to force processloop to end
eproc.set()
ms.iterend()
ms.close()
break
if not d['iterstatus']: # using iterstatus1 is more conservative here. trying to get around hangup on darwin.
print 'End of data (iterator)'
eproc.set()
ms.iterend()
ms.close()
break
def readtriggerloop(d, eproc, emove):
""" Defined purely to trigger readloop to continue without starting processloop
"""
while 1:
eproc.wait()
eproc.clear()
print 'Iterating readloop...'
emove.set()
if not d['iterstatus']: # using iterstatus1 is more conservative here. trying to get around hangup on darwin.
print 'End of data (iterator)'
eproc.set()
ms.iterend()
ms.close()
break
def processloop(d, eproc, emove):
""" Processing stage of parallel data function.
Only processes from data. Assumes a "first in, first out" model, where 'data' defines next buffer to process.
Event triggered by readloop when 'data' is filled.
"""
while 1:
eproc.wait()
eproc.clear()
print 'Processing for itercount %d. ' % (d['itercount'])
# name = mp.current_process().name
# print '%s: processing data' % name
# optionally can flag or insert transients here. done in readloop to improve parallelization
# lib.phaseshift(data, d, n.radians(0.1), n.radians(0.), u, v) # phase shifts data in place
# data[100] = data[100] + 10+0j
# lib.phaseshift(data, d, n.radians(0.), n.radians(0.1), u, v) # phase shifts data in place
# lib.dataflag(datacal, sigma=1000., mode='blstd')
beamnum = 0
resultlist = []
# SUBMITTING THE LOOPS
pool = mp.Pool(processes=d['nthreads']) # reserve one for reading. also one for processloop?
if n.any(data):
for dmind in xrange(len(d['dmarr'])):
print 'Processing DM = %d (max %d)' % (d['dmarr'][dmind], d['dmarr'][-1])
for dtind in xrange(len(d['dtarr'])):
result = pool.apply_async(imgallloop, [d, dmind, dtind, beamnum])
resultlist.append(result)
else:
print 'Data for processing is zeros. Moving on...'
# COLLECTING THE RESULTS
candslist = []
for i in xrange(len(resultlist)):
results = resultlist[i].get()
if results:
for i in xrange(len(results)):
candslist.append(results[i])
print 'Adding %d from itercount %d of %s. ' % (len(candslist), d['itercount'], d['filename'])
# if the readloop has run out of data, close down processloop, else continue
if not d['iterstatus']:
pool.close()
pool.join()
if d['savecands']:
save(d, candslist)
emove.set() # clear up any loose ends
print 'End of processloop'
break
else: # we're continuing, so signal data move, then save cands
emove.set()
pool.close()
pool.join()
if d['savecands']:
save(d, candslist)
def readloop2(d, eproc, emove):
""" Profiles readloop
"""
cProfile.runctx('readloop(d, eproc, emove)', globals(), locals(), 'readloop.prof')
def processloop2(d, eproc, emove):
""" Profiles processloop
"""
cProfile.runctx('processloop(d, eproc, emove)', globals(), locals(), 'processloop.prof')
def calc_dmlist(dm_lo,dm_hi,t_samp,t_intr,b_chan,ctr_freq,n_chans,tolerance=1.25):
"""
This procedure runs the HTRU-style calculation of DM trial steps.
Input parameters:
- Lowest DM desired
- Highest DM desired
- tsamp
- intrinsic pulse width
- bandwidth of single channel
- center freq
- n channels
- tolerance of how much you're willing to smear out your signal (in units of ideal sample time)
"""
dmarr = []
dm = dm_lo
while (dm <= dm_hi):
dmarr.append(dm)
old_dm = dm
ch_fac = 8.3*b_chan/(ctr_freq*ctr_freq*ctr_freq)
bw_fac = 8.3*b_chan*n_chans/4/(ctr_freq*ctr_freq*ctr_freq)
t00 = n.sqrt(t_samp*t_samp + t_intr*t_intr + (dm*ch_fac)**2)
tol_fac = tolerance*tolerance*t00*t00 - t_samp*t_samp - t_intr*t_intr
new_dm = (bw_fac*bw_fac*dm + n.sqrt(-1.*(ch_fac*bw_fac*dm)**2. + ch_fac*ch_fac*tol_fac + bw_fac*bw_fac*tol_fac))/(ch_fac**2. + bw_fac**2)
dm = new_dm
return dmarr
### THIS IS THREAD IS THE "MAIN"
def pipe_thread(filename, nints=200, nskip=0, iterint=200, spw=[0], chans=range(5,59), dmarr=[0.], dtarr=[1], fwhmsurvey=0.5, fwhmfield=0.5, selectpol=['RR','LL'], scan=0, datacol='data', size=0, res=0, sigma_bisp=6.5, sigma_image=6.5, filtershape=None, secondaryfilter='fullim', specmodfilter=1.5, searchtype='imageall', telcalfile='', telcalcalibrator='', gainfile='', bpfile='', savecands=0, candsfile='', flagmode='standard', flagantsol=True, nthreads=1, wplanes=0, excludeants=[]):
""" Threading for parallel data reading and processing.
Either side can be faster than the other, since data are held for processing in shared buffer.
size/res define uvgrid parameters. if either set to 0, then they are dynamically set to image full field of view and include all visibilities.
searchtype can be 'readonly', '' to do little but setup, or any string to do image search, or include 'lowmem' for low memory version that sums polarizations.
DESCRIPTION OF PARAMETERS:
nints to datacol parameters define data to read
size gives uv extent in N_wavelengths
res chosen to be 50 to cover the full FOV of VLA
sigma_'s tell what threshold to use for bispec or image
filtershape etc. is about matched filtering for candidate detection. 'b' uses conv to subtract bgwindow, 'z' subtracts mean over all times in iterint.
secondaryfilter defines how imaged candidates are filtered ('specmod' or 'fullim' are the options)
specmodfilter IS A FUDGE FACTOR. In qimg, this factor tells you how much to tolerate spectral modulation deviance.
searchtype tells what algorithm to do detection. List defined by Casey. Don't change this, it might break things.
telcal thru bpfile --> options for calibration
savecands is bool to save candidates or not.
candsfile is the prefix used to name candidates.
flagmode defines algorithm to do flagging. applies casa flags always.
flagantsol --> uses CASA antenna flagging or not
nthreads --> size of pool for multithreaded work.
wplanes defines the number of w-planes for w-projection (0 means don't do w-projection)
"""
# set up thread management and shared memory and metadata
global data, datatrim, u, v, w, time
mgr = mp.Manager()
d = mgr.dict()
eproc = mp.Event() # event signalling to begin processing
emove = mp.Event() # event signalling to move data into processing buffers (data, flag, u, v, w, time)
# define basic shared params
d['filename'] = filename
d['spw'] = spw
d['datacol'] = datacol
d['dmarr'] = dmarr
d['dtarr'] = dtarr
d['scan'] = scan
d['nskip'] = nskip
d['nints'] = nints # total ints to iterate over
d['iterint'] = iterint # time step for msiter
d['chans'] = chans
d['nchan'] = len(chans)
d['selectpol'] = selectpol
if 'lowmem' in searchtype:
print 'Running in \'lowmem\' mode. Reading pols %s, then summing after cal, flag, and filter. Flux scale not right if pols asymmetrically flagged.' % selectpol
d['npol'] = 1
else:
d['npol'] = len(selectpol)
d['filtershape'] = filtershape
d['bgwindow'] = 10
d['sigma_bisp'] = sigma_bisp
d['sigma_image'] = sigma_image
d['size'] = size
d['sizex'] = size
d['sizey'] = size
d['res'] = res
d['secondaryfilter'] = secondaryfilter
d['specmodfilter'] = specmodfilter # fudge factor for spectral modulation. 1==ideal, 0==do not apply, >1==non-ideal broad-band signal
d['searchtype'] = searchtype
d['delaycenters'] = calc_hexcenters(fwhmsurvey, fwhmfield)
d['telcalfile'] = telcalfile # telcal file produced by online system
d['telcalcalibrator'] = telcalcalibrator
d['gainfile'] = gainfile
d['bpfile'] = bpfile
d['savecands'] = savecands
d['excludeants'] = excludeants
d['candsfile'] = candsfile
d['flagmode'] = flagmode
d['flagantsol'] = flagantsol
d['nthreads'] = nthreads
d['wplanes'] = wplanes # flag to turn on/off wproj. later overwritten with wplane inv conv kernel
# define basic data state
print 'Preparing to read...'
d['iterstatus'] = readprep(d)
# d['datadelay'] = n.array([[lib.calc_delay(d['freq'], d['inttime']*d['dtarr'][i], d['dmarr'][j]).max() for i in range(len(d['dtarr']))] for j in range(len(d['dmarr']))]) # keep track of delay shift as array indexed with [dmind][dtind]
d['datadelay'] = n.array([lib.calc_delay(d['freq'], d['inttime'], d['dmarr'][i]).max() for i in range(len(d['dmarr']))]) # keep track of delay shift as array indexed with [dmind][dtind]
# time stamp and candidate save file
tt = timestamp.localtime()
d['starttime'] = tt
print 'Start time: %s_%s_%s:%s:%s:%s' % (tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec)
# define candidate file
if d['savecands']:
if not d['candsfile']:
timestring = '%s_%s_%s:%s:%s:%s' % (tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec)
d['candsfile'] = 'cands_'+filename[:-3]+'.pkl'
# d['candsfile'] = 'cands_'+timestring+'.pkl'
picklabledict = d.copy()
pkl = open(d['candsfile'], 'wb')
pickle.dump(picklabledict, pkl)
pkl.close()
# create shared data arrays
print 'Preparing shared memory objects...'
# data_mem = {}
# data = {}
# for dmind in xrange(len(d['dmarr'])):
# data_mem[dmind] = mp.Array(ctypes.c_float, ((d['iterint']/d['resamplearr'][dmind])*d['nbl']*d['nchan']*d['npol']*2)) # x2 to store complex values in single array
# data[dmind] = numpyview(data_mem[dmind], 'complex64', ((d['iterint']/d['resamplearr'][dmind]), d['nbl'], d['nchan'], d['npol']))
# data[dmind][:] = n.zeros(((d['iterint']/d['resamplearr'][dmind]), d['nbl'], d['nchan'], d['npol']))
data_mem = mp.Array(ctypes.c_float, (d['iterint']*d['nbl']*d['nchan']*d['npol']*2)) # x2 to store complex values in single array
data = numpyview(data_mem, 'complex64', (d['iterint'], d['nbl'], d['nchan'], d['npol']))
data[:] = n.zeros((d['iterint'], d['nbl'], d['nchan'], d['npol']))
datatrim = {}; datatrim_mem = {}
totalnint = iterint # start counting size of memory in integrations
for dmind in xrange(len(d['dmarr'])): # save the trimmings!
datatrim[dmind] = {}; datatrim_mem[dmind] = {}
for dtind in xrange(len(d['dtarr'])):
dt = d['dtarr'][dtind]
if d['datadelay'][dmind] >= dt:
datatrim_mem[dmind][dtind] = mp.Array(ctypes.c_float, ((d['datadelay'][dmind]/dt)*d['nbl']*d['nchan']*d['npol']*2)) # x2 to store complex values in single array
datatrim[dmind][dtind] = numpyview(datatrim_mem[dmind][dtind], 'complex64', ((d['datadelay'][dmind]/dt), d['nbl'], d['nchan'], d['npol']))
datatrim[dmind][dtind][:] = n.zeros(((d['datadelay'][dmind]/dt), d['nbl'], d['nchan'], d['npol']), dtype='complex64')
totalnint += d['datadelay'][dmind]/dt
else:
datatrim[dmind][dtind] = n.array([])
print 'Visibility memory usage is %d GB' % (8*(totalnint * d['nbl'] * d['nchan'] * d['npol'])/1024**3) # factor of 2?
# later need to update these too
# flag_mem = mp.Array(ctypes.c_bool, iterint*d['nbl']*d['nchan']*d['npol'])
u_mem = mp.Array(ctypes.c_float, iterint*d['nbl'])
v_mem = mp.Array(ctypes.c_float, iterint*d['nbl'])
w_mem = mp.Array(ctypes.c_float, iterint*d['nbl'])
time_mem = mp.Array(ctypes.c_float, iterint)
# new way is to convert later
# flag = numpyview(flag_mem, 'bool', (iterint, d['nbl'], d['nchan'], d['npol']))
u = numpyview(u_mem, 'float32', (iterint, d['nbl']))
v = numpyview(v_mem, 'float32', (iterint, d['nbl']))
w = numpyview(w_mem, 'float32', (iterint, d['nbl']))
time = numpyview(time_mem, 'float32', (iterint))
print 'Starting processing and reading loops...'
try:
if searchtype:
if searchtype == 'readonly':
pread = mp.Process(target=readloop, args=(d,eproc,emove))
pread.start()
pproc = mp.Process(target=readtriggerloop, args=(d, eproc,emove))
pproc.start()
# trigger events to allow moving data to working area
# This initial set makes it so the read loop bypasses the emove event the first time through.
emove.set()
# wait for threads to end (when read iteration runs out of data)
pread.join()
pproc.join()
else:
# start processes
pread = mp.Process(target=readloop, args=(d,eproc,emove))
pread.start()
pproc = mp.Process(target=processloop, args=(d,eproc,emove))
pproc.start()
# trigger events to allow moving data to working area
# This initial set makes it so the read loop bypasses the emove event the first time through.
emove.set()
# wait for threads to end (when read iteration runs out of data)
pread.join()
pproc.join()
else:
print 'Not starting read and process threads...'
except KeyboardInterrupt:
print 'Ctrl-C received. Shutting down threads...'
pread.terminate()
pproc.terminate()
pread.join()
pproc.join()
return d.copy()
| apache-2.0 |
djgagne/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
dsm054/pandas | pandas/tests/frame/test_arithmetic.py | 1 | 24964 | # -*- coding: utf-8 -*-
from collections import deque
from datetime import datetime
import operator
import pytest
import numpy as np
from pandas.compat import range
import pandas as pd
import pandas.util.testing as tm
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
# Specifically _not_ flex-comparisons
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame({col: x[col] == y[col]
for col in x.columns},
index=x.index, columns=x.columns)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame({col: x[col] != y[col]
for col in x.columns},
index=x.index, columns=x.columns)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError):
x >= y
with pytest.raises(TypeError):
x > y
with pytest.raises(TypeError):
x < y
with pytest.raises(TypeError):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
df['dates'] = pd.date_range('20010101', periods=len(df))
df2 = df.copy()
df2['dates'] = df['a']
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)),
columns=['a', 'b'])
df2 = pd.DataFrame({'a': pd.date_range('20010101', periods=len(df)),
'b': pd.date_range('20100101', periods=len(df))})
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd. DataFrame({'dates1': pd.date_range('20010101', periods=10),
'dates2': pd.date_range('20010102', periods=10),
'intcol': np.random.randint(1000000000, size=10),
'floatcol': np.random.randn(10),
'stringcol': list(tm.rands(10))})
df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ['eq', 'ne']:
expected = left_f(df, pd.Timestamp('20010109'))
result = right_f(pd.Timestamp('20010109'), df)
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(TypeError):
left_f(df, pd.Timestamp('20010109'))
with pytest.raises(TypeError):
right_f(pd.Timestamp('20010109'), df)
# nats
expected = left_f(df, pd.Timestamp('nat'))
result = right_f(pd.Timestamp('nat'), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([['1989-08-01', 1], ['1989-08-01', 2]])
other = pd.DataFrame([['a', 'b'], ['c', 'd']])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False],
[True, False],
[False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons(object):
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({'a': arr})
df2 = pd.DataFrame({'a': arr2})
rs = df.gt(df2)
assert not rs.values.any()
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({'a': arr3})
rs = df3.gt(2j)
assert not rs.values.any()
# corner, dtype=object
df1 = pd.DataFrame({'col': ['foo', np.nan, 'bar']})
df2 = pd.DataFrame({'col': ['foo', datetime.now(), 'bar']})
result = df1.ne(df2)
exp = pd.DataFrame({'col': [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
result = getattr(df, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic(object):
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range('2016-01-01', periods=10)
tdi = pd.timedelta_range('1', periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi,
1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range('2016-01-01', periods=3)
ser = pd.Series(['1 Day', 'NaT', '2 Days'], dtype='timedelta64[ns]')
df = pd.DataFrame({'A': dti, 'B': ser})
other = pd.DataFrame({'A': ser, 'B': ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{'A': pd.Series(['2016-01-02', '2016-01-03', '2016-01-05'],
dtype='datetime64[ns]'),
'B': ser * 2})
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(self, all_arithmetic_operators, float_frame,
mixed_float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith('__r'):
return getattr(operator, op.replace('__r', '__'))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize('op', ['__add__', '__sub__', '__mul__'])
def test_arith_flex_frame_mixed(self, op, int_frame, mixed_int_frame,
mixed_float_frame):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ['__sub__']:
dtype = dict(B='uint64', C=None)
elif op in ['__add__', '__mul__']:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators,
float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match='fill_value'):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match='fill_value'):
float_frame.add(float_frame.iloc[0], axis='index', fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs('a')
col = df['two']
# after arithmetic refactor, add truediv here
ops = ['add', 'sub', 'mul', 'mod']
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='int64')
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='float64')
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([])
df_len0 = pd.DataFrame([], columns=['A', 'B'])
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
with pytest.raises(NotImplementedError, match='fill_value'):
df.add(ser_len0, fill_value='E')
with pytest.raises(NotImplementedError, match='fill_value'):
df_len0.sub(df['A'], axis=None, fill_value=3)
class TestFrameArithmetic(object):
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame([[2, 4],
[4, 6],
[6, 8]],
columns=df.columns, index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame([[1, 2],
[5, 6],
[9, 10]],
columns=df.columns, index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self,
all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [getattr(df.loc['A'], opname)(rowlike.squeeze()),
getattr(df.loc['B'], opname)(rowlike.squeeze()),
getattr(df.loc['C'], opname)(rowlike.squeeze())]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
if opname in ['__rmod__', '__rfloordiv__']:
# exvals will have dtypes [f8, i8, i8] so expected will be
# all-f8, but the DataFrame operation will return mixed dtypes
# use exvals[-1].dtype instead of "i8" for compat with 32-bit
# systems/pythons
expected[False] = expected[False].astype(exvals[-1].dtype)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self,
all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze())}
dtype = None
if opname in ['__rmod__', '__rfloordiv__']:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index,
dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == 'i').all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == 'i').all()
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
def test_arith_mixed(self):
left = pd.DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 2, 3]})
result = left + left
expected = pd.DataFrame({'A': ['aa', 'bb', 'cc'],
'B': [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize('values', [[1, 2], (1, 2), np.array([1, 2]),
range(1, 3), deque([1, 2])])
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({'A': [1, 1], 'B': [1, 1]})
expected = pd.DataFrame({'A': [2, 2], 'B': [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(np.arange(1, 10, dtype='f8').reshape(3, 3),
columns=['one', 'two', 'three'],
index=['a', 'b', 'c'])
val1 = df.xs('a').values
added = pd.DataFrame(df.values + val1,
index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T,
index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df['two'])
added = pd.DataFrame(df.values + val2,
index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index,
columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis='index'), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3,
index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
| bsd-3-clause |
abhishekkrthakur/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
dssg/wikienergy | disaggregator/build/pandas/doc/sphinxext/numpydoc/plot_directive.py | 89 | 20530 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
from __future__ import division, absolute_import, print_function
import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
import sphinx
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in list(options.items())
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec(setup.config.plot_pre_code, ns)
exec(code, ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in range(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| mit |
UNR-AERIAL/scikit-learn | sklearn/utils/testing.py | 84 | 24860 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
seckcoder/lang-learn | python/sklearn/examples/svm/plot_svm_regression.py | 5 | 1430 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynominial and RBF
kernels.
"""
print __doc__
###############################################################################
# Generate sample data
import numpy as np
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
from sklearn.svm import SVR
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
import pylab as pl
pl.scatter(X, y, c='k', label='data')
pl.hold('on')
pl.plot(X, y_rbf, c='g', label='RBF model')
pl.plot(X, y_lin, c='r', label='Linear model')
pl.plot(X, y_poly, c='b', label='Polynomial model')
pl.xlabel('data')
pl.ylabel('target')
pl.title('Support Vector Regression')
pl.legend()
pl.show()
| unlicense |
y3ah/Sentiment_Categorization | source/decision_tree_classifier.py | 1 | 2065 | # -*- coding: utf-8 -*-
##
##decision_tree_classifier.py
import codecs
import numpy
#from scipy.sparse import csr_matrix
from sklearn import cross_validation
from sklearn.tree import DecisionTreeClassifier
from feature_extraction import *
from feature_selection import *
import time
#从文件读入语料
#in_file_name = '../data/train_text.txt'
in_file_name = '../data/train_text2.txt' #去除标签,处理A_B
#in_file_name = '../data/train_text3.txt' #换stop
bin_vectorizer, term_occurence = feature_extraction(in_file_name)
def train(inputs, feature_name, tree_depth=5):
print 'training with %s feature...'%(feature_name)
clf = DecisionTreeClassifier(max_depth=tree_depth, criterion='entropy')
start_time = time.time()
f = cross_validation.cross_val_score(clf,\
inputs,class_label,scoring='f1',cv=10)
end_time = time.time()
print 'time: %f, result: mean:%f std:%f'%(end_time - start_time, \
f.mean(), f.std())
return clf, f
#clf, f = train(term_occurence.toarray(), 'term_occurence')
term_occurence_new = select_features(term_occurence.toarray(), class_label, 10.0)
clf, f = train(term_occurence_new, 'term_occurence_new')
#train(tf.toarray(), 'tf')
#train(tfidf.toarray(), 'tfidf')
'''
# result on Mac, 5 layers
In [20]: run decision_tree_classifier.py
training with term_occurence feature…
time: 192.313983, result: mean:0.604118 std:0.087560
training with tf feature…
time: 178.886782, result: mean:0.555732 std:0.095158
training with tfidf feature…
time: 176.856054, result: mean:0.508084 std:0.080090
# result on Win, 5 layers
run time: 2.482095 s
f1: 0.604198, f1_std: 0.084368
run time: 3.129976 s
f1: 0.556025, f1_std: 0.094461
run time: 3.141502 s
f1: 0.506478, f1_std: 0.080185
# result on Mac, 15 layers
training with term_occurence feature...
time: 386.084352, result: mean:0.682572 std:0.054867
# result on Win, 15 layers
run time: 10.412181 s
f1: 0.681385, f1_std: 0.059790
run time: 12.176176 s
f1: 0.653785, f1_std: 0.065988
run time: 11.532466 s
f1: 0.657862, f1_std: 0.064292
'''
| mit |
lbishal/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | 131 | 5705 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214::
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
bigdataelephants/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
JonWel/CoolProp | dev/pseudo-pure/fit_pseudo-pure_eos.py | 5 | 24174 | import numpy as np
from CoolProp.CoolProp import Props
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import scipy.optimize
import scipy.stats
import random
import h5py
from templates import *
indices = []
class TermLibrary():
"""
Build a term library using the coefficients from Wagner and Pruss (IAPWS95)
"""
def __init__(self):
L,D,T = [],[],[]
for i in range(1,6):
for j in range(-4,9):
T.append(float(j)/8.0)
D.append(float(i))
L.append(float(0))
for i in range(1,16):
for j in range(1,16):
T.append(float(j))
D.append(float(i))
L.append(float(1))
for i in range(1,13):
for j in range(1,11):
T.append(float(j))
D.append(float(i))
L.append(float(2))
for i in range(1,6):
for j in range(10,24):
T.append(float(j))
D.append(float(i))
L.append(float(3))
for i in range(1,10):
for j in range(10,21):
T.append(float(j))
D.append(float(i)*2)
L.append(float(4))
self.T = T
self.D = D
self.L = L
from Helmholtz import helmholtz
def rsquared(x, y):
""" Return R^2 where x and y are array-like."""
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y)
return r_value**2
def get_fluid_constants(Ref):
if Ref == 'R407F':
RefString = 'REFPROP-MIX:R32[0.47319469]&R125[0.2051091]&R134a[0.32169621]'
elif Ref == 'R410A':
RefString = 'REFPROP-MIX:R32[0.6976147]&R125[0.3023853]'
LIBRARY = TermLibrary()
# Coefficients for HFC blends
LIBRARY.N = np.array([9.87252E-01, -1.03017E+00, 1.17666E+00, 6.10984E+00, -7.79453E+00, 1.83377E-02, 1.05880E+00, -1.12018E+00, 6.29064E-01, 6.24982E+00, -8.07855E+00, 2.64843E-02, -2.53639E+00, 8.50922E-01, -5.20084E-01, -4.64225E-02, -1.75725E+00, 1.38469E+00, -9.22473E-01, -5.03562E-02, 6.79757E-01, -6.52431E-01, 2.33779E-01, -2.91790E-01, -1.38991E-01, 2.62270E-01, -3.51688E-03, -3.51953E-01, 2.86215E-01, -5.07076E-03, -1.96680E+00, 6.21190E-01, -1.95505E-01, -1.12009E+00, 2.77353E-02, 8.22098E-01, -2.77727E-01, -7.58262E-02, -8.15653E-02, 2.00760E-02, -1.39761E-02, 6.89437E-02, -4.42552E-03, 7.55927E-02, -8.30480E-01, 3.36159E-01, 8.98881E-01, -1.17591E+00, 3.58172E-01, -2.21041E-02, -2.33323E-02, -5.07525E-02, -5.42007E-02, 1.16181E-02, 1.09552E-02, -3.76062E-02, -1.26426E-02, 5.53849E-02, -7.10970E-02, 3.10441E-02, 1.32798E-02, 1.54776E-02, -3.14579E-02, 3.52952E-02, 1.59566E-02, -1.85110E-02, -1.01254E-02, 3.02373E-03, 4.55978E-03, 1.72477E-01, -2.61116E-01, -7.45473E-02, 8.18591E-02, -7.94097E-02, -1.04047E-05, 1.71939E-02, 1.61382E-02, 9.15953E-03, 1.70451E-02, 1.05992E-03, 1.16124E-03, -4.82049E-03, -3.61575E-03, -6.36579E-03, -6.07010E-03, -8.75332E-04])
LIBRARY.T = np.array([0.44, 1.2, 2.97, 0.67, 0.91, 5.96, 0.241, 0.69, 2.58, 0.692, 0.943, 5.8, 1.93, 1.7, 3.3, 7, 2.15, 2, 3, 7, 2.1, 4.3, 3.3, 4.7, 2.95, 0.7, 6, 1.15, 0.77, 5.84, 1.78, 2.05, 4.3, 2.43, 5.3, 2.2, 4.3, 12, 12, 13, 16, 13, 16.2, 13, 3, 2.7, 0.76, 1.48, 2.7, 6, 6, 17, 17, 0.3, 0.24, 1.8, 1.2, 0.25, 7, 8.7, 11.6, 0.45, 8.4, 8.5, 11.5, 25, 26, 0.2, 0.248, 0.2, 0.74, 3, 0.24, 2.86, 8, 17, 16, 16, 16.2, 0.7, 0.69, 7.4, 8.7, 1.25, 1.23, 4.7])
LIBRARY.D = np.array([1.0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 9])
LIBRARY.L = np.array([0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 0, 0, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 0, 0, 3, 3, 1, 1, 2])
global indices
indices = set()
while len(indices) < 23:
indices.add(random.randint(0, len(LIBRARY.T)-1))
print indices, len(LIBRARY.T)
T0 = np.array([LIBRARY.T[i] for i in indices])
D0 = np.array([LIBRARY.D[i] for i in indices])
L0 = np.array([LIBRARY.L[i] for i in indices])
N0 = np.array([LIBRARY.N[i] for i in indices])
# Values from Span short(2003) (polar)
# D0 = np.array([0, 1.0, 1.0, 1.0, 3.0, 7.0, 1.0, 2.0, 5.0, 1.0, 1.0, 4.0, 2.0])
# L0 = np.array([0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0])
# T0 = np.array([0, 0.25, 1.25, 1.5, 0.25, 0.875, 2.375, 2.0, 2.125, 3.5, 6.5, 4.75, 12.5])
# N0 = 0.5*np.ones_like(D0)
# values from R410A
N0 = np.array([0.0, 0.987252, -1.03017, 1.17666, -0.138991, 0.00302373, -2.53639, -1.96680, -0.830480, 0.172477, -0.261116, -0.0745473, 0.679757, -0.652431, 0.0553849, -0.0710970, -0.000875332, 0.0200760, -0.0139761, -0.0185110, 0.0171939, -0.00482049])
T0 = np.array([0.0,0.44,1.2,2.97,2.95,0.2,1.93,1.78,3.0,0.2,0.74,3.0,2.1,4.3,0.25,7.0,4.7,13.0,16.0,25.0,17.0,7.4])
D0 = np.array([0,1.0,1,1,2,5,1,2,3,5,5,5,1,1,4,4,9,2,2,4,5,6])
L0 = np.array([0,0.0,0,0,0,0,1,1,1,1,1,1,2,2,2,2,2,3,3,3,3,3])
indices = set()
while len(indices) < 5:
indices.add(random.randint(0, len(LIBRARY.T)-1))
print indices, len(LIBRARY.T)
T0 = np.append(T0, [LIBRARY.T[i] for i in indices])
D0 = np.append(D0, [LIBRARY.D[i] for i in indices])
L0 = np.append(L0, [LIBRARY.L[i] for i in indices])
N0 = np.append(N0, [LIBRARY.N[i] for i in indices])
# values from R407C
# N0 = np.array([0.0, 1.0588,-1.12018, 0.629064,-0.351953, 0.00455978,-1.75725,-1.12009, 0.0277353, 0.898881,-1.17591, 0.0818591,-0.0794097,-0.0000104047, 0.233779,-0.291790, 0.0154776,-0.0314579,-0.00442552,-0.0101254, 0.00915953,-0.003615])
# T0 = np.array([0.0,0.241,0.69,2.58,1.15,0.248,2.15,2.43,5.3,0.76,1.48,0.24,2.86,8.0,3.3,4.7,0.45,8.4,16.2,26.0,16.0,8.7])
# D0 = np.array([0.0,1,1,1,2,5,1,2,2,3,3,5,5,5,1,1,4,4,2,4,5,6])
# L0 = np.array([0.0,0,0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,3,3,3,3])
return RefString, N0, T0, D0, L0
class IdealPartFitter(object):
def __init__(self, Ref):
self.Ref = Ref
self.RefString, N0, T0, D0, L0 = get_fluid_constants(Ref)
self.molemass = Props(self.RefString,'molemass')
self.Tc = Props(self.RefString, 'Tcrit')
self.rhoc = Props(self.RefString, 'rhocrit')
self.pc = Props(self.RefString, 'pcrit')
self.T = np.linspace(100, 450, 200)
self.tau = self.Tc/self.T
self.C = Props('C', 'T', self.T, 'D', 1e-15, self.RefString)
R = 8.314472/self.molemass
self.cp0_R = self.C/R
def cp0_R_from_fit(self, a_e):
a = a_e[0:len(a_e)//2]
e = a_e[len(a_e)//2::]
u1 = e[1]/self.T
u2 = e[2]/self.T
u3 = e[3]/self.T
return a[0]*self.T**e[0]+a[1]*u1**2*np.exp(u1)/(np.exp(u1)-1)**2+a[2]*u2**2*np.exp(u2)/(np.exp(u2)-1)**2+a[3]*u3**2*np.exp(u3)/(np.exp(u3)-1)**2
def OBJECTIVE_cp0_R(self, a_e):
cp0_R_fit = self.cp0_R_from_fit(a_e)
RMS = np.sqrt(np.mean(np.power((self.cp0_R-cp0_R_fit)/self.cp0_R, 2)))
return RMS
def fit(self):
a_e = [2.8749, 2.0623, 5.9751, 1.5612, 0.1, 697.0, 1723.0, 3875.0]
a_e = scipy.optimize.minimize(self.OBJECTIVE_cp0_R, a_e).x
self.a = a_e[0:len(a_e)//2]
self.e = a_e[len(a_e)//2::]
cp0_over_R_check = 1-self.tau**2*self.d2phi0_dTau2(self.tau)
plt.plot(self.T, (self.cp0_R_from_fit(a_e)/self.cp0_R-1)*100, '-', self.T, (cp0_over_R_check/self.cp0_R-1)*100, '^')
plt.xlabel('Temperature [K]')
plt.ylabel('($c_{p0}/R$ (fit) / $c_{p0}/R$ (REFPROP) -1)*100 [%]')
plt.savefig('cp0.pdf')
plt.close()
def d2phi0_dTau2(self, tau):
d = []
for _tau in tau:
#lead term is killed
d.append(helmholtz.phi0_logtau(-1.0).dTau2(_tau, _tau)
+ helmholtz.phi0_cp0_poly(self.a[0],self.e[0],self.Tc,298.15).dTau2(_tau, _tau)
+ helmholtz.phi0_Planck_Einstein(self.a,self.e/self.Tc,1,len(self.a)-1).dTau2(_tau, _tau)
)
return np.array(d)
class ResidualPartFitter(object):
def __init__(self, Ref, IPF):
self.Ref = Ref
self.IPF = IPF
self.RefString, self.N0, self.T0, self.D0, self.L0 = get_fluid_constants(Ref)
self.Tc = Props(self.RefString,'Tcrit')
self.rhoc = Props(self.RefString,'rhocrit')
molemass = Props(self.RefString,'molemass')
self.R = 8.314472/ molemass
def termwise_Rsquared(self):
keepers = []
values = []
print len(self.N0), 'terms at start'
for i in range(len(self.N0)):
n = helmholtz.vectord([float(1)])
d = helmholtz.vectord([self.D0[i]])
t = helmholtz.vectord([self.T0[i]])
l = helmholtz.vectord([self.L0[i]])
self.phir = helmholtz.phir_power(n, d, t, l, 0, 0)
PPF = self.evaluate_EOS(np.array(list(n)))
R2 = rsquared(PPF.p,self.phir.dDeltaV(self.tauV,self.deltaV))
values.append((R2,i))
if R2 > 0.9:
keepers.append(i)
values,indices = zip(*reversed(sorted(values)))
keepers = list(indices[0:30])
self.N0 = self.N0[keepers]
self.T0 = self.T0[keepers]
self.D0 = self.D0[keepers]
self.L0 = self.L0[keepers]
print len(self.N0), 'terms at end'
def generate_1phase_data(self):
Tc = Props(self.RefString, 'Tcrit')
rhoc = Props(self.RefString, 'rhocrit')
TTT, RHO, PPP, CPP, CVV, AAA = [], [], [], [], [], []
for _T in np.linspace(220, 450, 100):
print _T
for _rho in np.logspace(np.log10(1e-2), np.log10(rhoc), 100):
try:
if _T > Tc:
p = Props('P', 'T', _T, 'D', _rho, self.RefString)
cp = Props('C', 'T', _T, 'D', _rho, self.RefString)
cv = Props('O', 'T', _T, 'D', _rho, self.RefString)
a = Props('A', 'T', _T, 'D', _rho, self.RefString)
else:
DL = Props('D', 'T', _T, 'Q', 0, self.RefString)
DV = Props('D', 'T', _T, 'Q', 1, self.RefString)
if _rho < DV or _rho > DL:
p = Props('P', 'T', _T, 'D', _rho, self.RefString)
cp = Props('C', 'T', _T, 'D', _rho, self.RefString)
cv = Props('O', 'T', _T, 'D', _rho, self.RefString)
a = Props('A', 'T', _T, 'D', _rho, self.RefString)
else:
p = None
if p is not None:
TTT.append(_T)
RHO.append(_rho)
PPP.append(p)
CPP.append(cp)
CVV.append(cv)
AAA.append(a)
except ValueError as VE:
print VE
pass
for _rho in np.linspace(rhoc, 3.36*rhoc, 50):
try:
if _T > Tc:
p = Props('P', 'T', _T, 'D', _rho, self.RefString)
cp = Props('C', 'T', _T, 'D', _rho, self.RefString)
cv = Props('O', 'T', _T, 'D', _rho, self.RefString)
a = Props('A', 'T', _T, 'D', _rho, self.RefString)
else:
DL = Props('D', 'T', _T, 'Q', 0, self.RefString)
DV = Props('D', 'T', _T, 'Q', 1, self.RefString)
if _rho < DV or _rho > DL:
p = Props('P', 'T', _T, 'D', _rho, self.RefString)
cp = Props('C', 'T', _T, 'D', _rho, self.RefString)
cv = Props('O', 'T', _T, 'D', _rho, self.RefString)
a = Props('A', 'T', _T, 'D', _rho, self.RefString)
else:
p = None
if p is not None:
TTT.append(_T)
RHO.append(_rho)
PPP.append(p)
CPP.append(cp)
CVV.append(cv)
AAA.append(a)
except ValueError as VE:
print VE
pass
h = h5py.File('T_rho_p.h5','w')
grp = h.create_group(self.Ref)
grp.create_dataset("T",data = np.array(TTT),compression = "gzip")
grp.create_dataset("rho", data = np.array(RHO),compression = "gzip")
grp.create_dataset("p", data = np.array(PPP),compression = "gzip")
grp.create_dataset("cp", data = np.array(CPP),compression = "gzip")
grp.create_dataset("cv", data = np.array(CVV),compression = "gzip")
grp.create_dataset("speed_sound", data = np.array(AAA),compression = "gzip")
h.close()
def load_data(self):
h = h5py.File('T_rho_p.h5','r')
self.T = h.get(self.Ref + '/T').value
self.rho = h.get(self.Ref + '/rho').value
self.p = h.get(self.Ref + '/p').value
self.cp = h.get(self.Ref + '/cp').value
self.cv = h.get(self.Ref + '/cv').value
self.speed_sound = h.get(self.Ref + '/speed_sound').value
self.tau = self.Tc/self.T
self.delta = self.rho/self.rhoc
self.tauV = helmholtz.vectord(self.tau)
self.deltaV = helmholtz.vectord(self.delta)
# Get the derivative d2phi0_dTau2 from the ideal part fitter
self.d2phi0_dTau2 = self.IPF.d2phi0_dTau2(self.tau)
def evaluate_EOS(self, N):
self.phir.n = helmholtz.vectord(N)
dDelta = self.phir.dDeltaV(self.tauV, self.deltaV)
dTau2 = self.phir.dTau2V(self.tauV, self.deltaV)
dDelta2 = self.phir.dDelta2V(self.tauV, self.deltaV)
dDelta_dTau = self.phir.dDelta_dTauV(self.tauV, self.deltaV)
# Evaluate the pressure
p = (self.rho*self.R*self.T)*(1 + self.delta*dDelta)
# Evaluate the specific heat at constant volume
cv_over_R = -self.tau**2*(self.d2phi0_dTau2 + dTau2)
cv = cv_over_R*self.R
# Evaluate the specific heat at constant pressure
cp_over_R = cv_over_R+(1.0+self.delta*dDelta-self.delta*self.tau*dDelta_dTau)**2/(1+2*self.delta*dDelta+self.delta**2*dDelta2)
cp = cp_over_R*self.R
# Evaluate the speed of sound
w = np.sqrt(1000*self.R*self.T*cp_over_R/cv_over_R*(1+2*self.delta*dDelta+self.delta**2*dDelta2))
class stub: pass
PPF = stub()
PPF.p = np.array(p, ndmin = 1).T
PPF.cp = np.array(cp, ndmin = 1).T
PPF.cv = np.array(cv, ndmin = 1).T
PPF.w = np.array(w, ndmin = 1).T
return PPF
def OBJECTIVE(self, N):
PPF = self.evaluate_EOS(N)
## plt.plot(PPF.p, self.p); plt.show()
## plt.plot(PPF.cp, self.cp); plt.show()
## plt.plot(PPF.cv, self.cv); plt.show()
## plt.plot(PPF.w, self.speed_sound); plt.show()
w_p = 1.0
w_cv = 1.0
w_w = 1.0
w_cp = 1.0
w_total = (w_p+w_cv+w_w+w_cp)/4
w_p_norm = w_p/w_total
w_cv_norm = w_cv/w_total
w_cp_norm = w_cp/w_total
w_w_norm = w_w/w_total
residuals = np.r_[(PPF.p/self.p-1),(PPF.cv/self.cv-1),(PPF.cp/self.cp-1)]#,(PPF.w**2/self.speed_sound**2-1)]
RMS = np.sqrt(np.mean(np.power(residuals, 2)))
print 'RMS:',RMS*100, '% Max',np.max(np.abs(residuals))*100,'%'
self.RMS = RMS
self.MaxError = np.max(np.abs(residuals))
return RMS
def fit(self):
# Kill off some not as good terms
#self.termwise_Rsquared()
# Load up the residual Helmholtz term with parameters
n = helmholtz.vectord(self.N0)
d = helmholtz.vectord(self.D0)
t = helmholtz.vectord(self.T0)
l = helmholtz.vectord(self.L0)
self.phir = helmholtz.phir_power(n, d, t, l, 1, len(self.N0)-1)
# Solve for the coefficients
Nbounds = [(-10,10) for _ in range(len(self.N0))]
tbounds = [(-1,30) for _ in range(len(self.T0))]
print self.OBJECTIVE(np.array(list(self.N0)))
#self.N = self.N0
#self.N = scipy.optimize.minimize(self.OBJECTIVE, np.array(list(self.N0)), bounds = Nbounds, options = dict(maxiter = 5)).x
self.N = scipy.optimize.minimize(self.OBJECTIVE, np.array(list(self.N0)), method = 'L-BFGS-B', bounds = Nbounds, options = dict(maxiter = 100)).x
# Write the coefficients to HDF5 file
h = h5py.File('fit_coeffs.h5','w')
grp = h.create_group(self.Ref)
grp.create_dataset("n", data = np.array(self.N), compression = "gzip")
print self.N
#grp.create_dataset("t", data = np.array(self.N[len(self.N)//2::]), compression = "gzip")
h.close()
def evaluate_REFPROP(self, Ref, T, rho):
p,cp,cv,w = [],[],[],[]
R = 8.314472/Props(Ref,'molemass')
for _T,_rho in zip(T, rho):
p.append(Props("P",'T',_T,'D',_rho,Ref))
cp.append(Props("C",'T',_T,'D',_rho,Ref))
cv.append(Props("O",'T',_T,'D',_rho,Ref))
w.append(Props("A",'T',_T,'D',_rho,Ref))
class stub: pass
PPF = stub()
PPF.p = np.array(p, ndmin = 1).T
PPF.cp = np.array(cp, ndmin = 1).T
PPF.cv = np.array(cv, ndmin = 1).T
PPF.w = np.array(w, ndmin = 1).T
return PPF
def check(self):
# Load the coefficients from file
h = h5py.File('fit_coeffs.h5','r')
grp = h.get(self.Ref)
n = grp.get('n').value
h.close()
print n
import matplotlib.colors as colors
cNorm = colors.LogNorm(vmin=1e-3, vmax=50)
PPF = self.evaluate_EOS(np.array(list(n)))
self.OBJECTIVE(np.array(list(n)))
print 'max error (p)',np.max(np.abs(PPF.p/self.p-1)*100),'%'
SC1 = plt.scatter(self.rho, self.T, s = 8, c = np.abs(PPF.p/self.p-1)*100, edgecolors = 'none', cmap = plt.get_cmap('jet'), norm = cNorm)
plt.gca().set_xscale('log')
cb = plt.colorbar()
cb.set_label('abs(PPF.p/self.p-1)*100')
plt.savefig('pressure.png')
plt.show()
print 'max error (cp)',np.max(np.abs(PPF.cp/self.cp-1)*100),'%'
SC1 = plt.scatter(self.rho, self.T, s = 8, c = np.abs(PPF.cp/self.cp-1)*100, edgecolors = 'none', cmap = plt.get_cmap('jet'), norm = cNorm)
plt.gca().set_xscale('log')
cb = plt.colorbar()
cb.set_label('abs(PPF.cp/self.cp-1)*100')
plt.savefig('cp.png')
plt.show()
## plt.plot(self.T,PPF.p/self.p,'.'); plt.show()
## plt.plot(self.T,PPF.cp/self.cp,'.'); plt.show()
## plt.plot(self.T,PPF.cv/self.cv,'.'); plt.show()
## plt.plot(self.T,PPF.w/self.speed_sound,'.'); plt.show()
class PPFFitterClass(object):
def __init__(self, Ref, regenerate_data = True, fit = True):
self.Ref = Ref
self.IPF = IdealPartFitter(Ref)
self.IPF.fit()
for i in range(1):
self.RPF = ResidualPartFitter(Ref, IPF = self.IPF)
if regenerate_data:
self.RPF.generate_1phase_data()
self.RPF.load_data()
if fit:
self.RPF.fit()
f = open('results.txt','a+')
print >> f, indices, self.RPF.RMS, self.RPF.MaxError
f.close()
self.RPF.check()
quit()
self.output_files()
def contour_plot(values):
"""
Parameters
----------
values : iterable, same size as T and rho
"""
plt.semilogx(self.RPF.rho,self.RPF.T,'o')
plt.show()
# Generate a regular grid to interpolate the data.
xi = np.linspace(min(self.RPF.T), max(self.RPF.T), 100)
yi = np.linspace(min(self.RPF.rho), max(self.RPF.rho), 100)
xi, yi = np.meshgrid(xi, yi)
# Interpolate using delaunay triangularization
zi = mlab.griddata(np.array(self.RPF.T),np.array(self.RPF.rho),np.array(values),xi,yi)
cont = plt.contourf(yi,xi,zi,30)
plt.colorbar()
plt.show()
def output_files(self):
h = h5py.File('fit_coeffs.h5','r')
n = h.get(self.Ref+'/n').value
#t = h.get(self.Ref+'/t').value
# Output the header file
header = PPF_h_template.format(Ref = self.Ref, RefUpper = self.Ref.upper())
acoeffs = '0, '+', '.join(['{a:0.6f}'.format(a=_) for _ in self.IPF.a])
# First one doesn't get divided by critical temperature, later ones do
bcoeffs = '0, '
bcoeffs += str(self.IPF.e[0])+', '
bcoeffs += ', '.join(['{b:0.4f}/{Tcrit:g}'.format(b=_,Tcrit = self.IPF.Tc) for _ in self.IPF.e[1::]])
ncoeffs = ', '.join(['{a:0.6g}'.format(a=_) for _ in n])
tcoeffs = ', '.join(['{a:0.6g}'.format(a=_) for _ in self.RPF.T0])
dcoeffs = ', '.join(['{a:0.6g}'.format(a=_) for _ in self.RPF.D0])
lcoeffs = ', '.join(['{a:0.6g}'.format(a=_) for _ in self.RPF.L0])
import sys
sys.path.append('..')
from fit_ancillary_ODRPACK import saturation_pressure, saturation_density
pL = saturation_pressure(self.IPF.RefString, self.IPF.Ref, LV = 'L')
pV = saturation_pressure(self.IPF.RefString, self.IPF.Ref, LV = 'V')
rhoL = saturation_density(self.IPF.RefString, self.IPF.Ref, form='A', LV='L', add_critical = False)
rhoV = saturation_density(self.IPF.RefString, self.IPF.Ref, form='B', LV='V', add_critical = False)
code = PPF_cpp_template.format(Ref = self.Ref,
RefUpper = self.Ref.upper(),
acoeffs = acoeffs,
bcoeffs = bcoeffs,
Ncoeffs = ncoeffs,
tcoeffs = tcoeffs,
dcoeffs = dcoeffs,
Lcoeffs = lcoeffs,
N_phir = len(n),
N_cp0 = len(self.IPF.a),
molemass = self.IPF.molemass,
Ttriple = 200,
accentric = 0.7,
pcrit = self.IPF.pc,
Tcrit = self.IPF.Tc,
rhocrit = self.IPF.rhoc,
pL = pL,
pV = pV,
rhoL = rhoL,
rhoV = rhoV
)
f = open(self.IPF.Ref+'.h','w')
f.write(header)
f.close()
f = open(self.IPF.Ref+'.cpp','w')
f.write(code)
f.close()
if __name__=='__main__':
Ref = 'R407F'
PPFFitterClass(Ref)
| mit |
tedunderwood/biographies | code/make_diff_matrix.py | 1 | 8947 | # Make diff matrix.
# USAGE:
# python3 make_diff_matrix.py name_of_outfile pathtodata1 pathtodata2 etc
# This will create ../data/name_of_outfile.csv and ..data/name_of_outfile.slopes.csv
# for instance, I did this:
# python3 make_diff_matrix.py 8-20-2017_diffmatrix ../natalie/out_files/all_post23bio_out.tsv ../natalie/out_files/all_pre23bio_new.tsv
VOCABLENGTH = 6000
import csv, sys, os
import numpy as np
import pandas as pd
import math
# from random import shuffle
# from random import random as randomprob
from sklearn.linear_model import LinearRegression
from collections import Counter
csv.field_size_limit(sys.maxsize)
arguments = sys.argv
out_filename = arguments[1]
if os.path.isfile(out_filename + '.csv'):
print(out_filename + ' already exists, and I refuse to overwrite it.')
files_to_use = []
for f in arguments[2:]:
if os.path.isfile(f):
files_to_use.append(f)
else:
print("Cannot find " + f)
# Let's load some metadata about the publication dates of these works,
# and the inferred genders of their authors.
personalnames = set()
with open("../lexicons/PersonalNames.txt", encoding="utf-8") as f:
names = f.readlines()
for line in names:
name = line.rstrip()
personalnames.add(name)
personalnames.add('said-' + name)
vocab = Counter()
def add2vocab(vocab, filepath):
with open(filepath, encoding = 'utf-8') as f:
reader = csv.DictReader(f, delimiter = '\t')
for row in reader:
gender = row['chargender']
if gender.startswith('u'):
continue
words = row['words'].split()
for w in words:
if not w.startswith('said-') and w not in personalnames:
vocab[w] += 1
# Let's create the vocabulary.
for f in files_to_use:
add2vocab(vocab, f)
vocabcount = len(vocab)
print("The data includes " + str(vocabcount) + " words")
wordsinorder = [x[0] for x in vocab.most_common(VOCABLENGTH)]
vocabulary = dict()
vocabset = set()
for idx, word in enumerate(wordsinorder):
vocabulary[word] = idx
vocabset.add(word)
print("Vocabulary sorted, top " + str(VOCABLENGTH) + " kept.")
vecbyyear = dict()
vecbyyear['m'] = dict()
vecbyyear['f'] = dict()
datevector = list(range(1780, 2008))
for g in ['f', 'm']:
for i in range(1780, 2008):
vecbyyear[g][i] = np.zeros((VOCABLENGTH))
def add2counts(vecbyyear, path):
with open(path, encoding = 'utf-8') as f:
reader = csv.DictReader(f, delimiter = '\t')
for row in reader:
gender = row['chargender']
if gender.startswith('u'):
continue
date = int(row['pubdate'])
if date < 1780 or date > 2008:
continue
words = row['words'].split()
for w in words:
if w in vocabset:
idx = vocabulary[w]
np.add.at(vecbyyear[gender][date], idx, 1)
# Let's actually count words.
for f in files_to_use:
add2counts(vecbyyear, f)
def dunnings(vectora, vectorb):
assert len(vectora) == len(vectorb)
veclen = len(vectora)
totala = np.sum(vectora)
totalb = np.sum(vectorb)
totalboth = totala + totalb
dunningvector = np.zeros(veclen)
for i in range(veclen):
if vectora[i] == 0 or vectorb[i] == 0:
continue
# Cause you know you're going to get div0 errors.
try:
probI = (vectora[i] + vectorb[i]) / totalboth
probnotI = 1 - probI
expectedIA = totala * probI
expectedIB = totalb * probI
expectedNotIA = totala * probnotI
expectedNotIB = totalb * probnotI
expected_table = np.array([[expectedIA, expectedNotIA],
[expectedIB, expectedNotIB]])
actual_table = np.array([[vectora[i], (totala - vectora[i])],
[vectorb[i], (totalb - vectorb[i])]])
G = np.sum(actual_table * np.log(actual_table / expected_table))
# We're going to use a signed version of Dunnings, so features where
# B is higher than expected will be negative.
if expectedIB > vectorb[i]:
G = -G
dunningvector[i] = G
except:
pass
# There are a million ways to get a div-by-zero or log-zero error
# in that calculation. I could check them all, or just do this.
# The vector was initialized with zeroes, which are the default
# value I want for failed calculations anyhow.
return dunningvector
def pure_rank_matrix(femalevectorsbyyear, malevectorsbyyear, datevector):
rankmatrix = []
magnitudematrix = []
for i in datevector:
d = dunnings(femalevectorsbyyear[i], malevectorsbyyear[i])
# transform this into a nonparametric ranking
decorated = [x for x in zip(d, [x for x in range(len(d))])]
decorated.sort()
negativeidx = -sum(d < 0)
positiveidx = 1
numzeros = sum(d == 0)
ranking = np.zeros(len(d))
for dvalue, index in decorated:
# to understand what follows, it's crucial to remember that
# we're iterating through decorated in dvalue order
if dvalue < 0:
ranking[index] = negativeidx
negativeidx += 1
elif dvalue > 0:
ranking[index] = positiveidx
positiveidx += 1
else:
# dvalue is zero
pass
checkzeros = sum(ranking == 0)
if numzeros != checkzeros:
print('error in number of zeros')
rawmagnitude = femalevectorsbyyear[i] + malevectorsbyyear[i]
normalizedmagnitude = rawmagnitude / np.sum(rawmagnitude)
assert len(ranking) == len(normalizedmagnitude)
rank_adjusted_by_magnitude = ranking * normalizedmagnitude
rankmatrix.append(ranking)
magnitudematrix.append(rank_adjusted_by_magnitude)
return np.array(magnitudematrix), np.array(rankmatrix)
def diff_proportion(vecbyyear, datevector):
diffmatrix = []
for yr in datevector:
if np.sum(vecbyyear['m'][yr]) == 0:
mvec = np.full(len(vecbyyear['m'][yr]), 0)
else:
mvec = (vecbyyear['m'][yr] * 5000) / np.sum(vecbyyear['m'][yr])
if np.sum(vecbyyear['f'][yr]) == 0:
fvec = np.full(len(vecbyyear['f'][yr]), 0)
else:
fvec = (vecbyyear['f'][yr] * 5000) / np.sum(vecbyyear['f'][yr])
dvec = fvec - mvec
diffmatrix.append(dvec)
return np.array(diffmatrix)
diffmatrix = diff_proportion(vecbyyear, datevector)
def writematrix(amatrix, outpath):
global wordsinorder, datevector
with open(outpath, mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
writer.writerow(['thedate'] + wordsinorder)
for i, date in enumerate(datevector):
writer.writerow(np.insert(amatrix[i, : ], 0, date))
writematrix(diffmatrix, '../data/' + out_filename + '.csv')
print('Linear regression to infer slopes.')
datevector = np.array(datevector)
outrows = []
for i in range(VOCABLENGTH):
thiscolumn = diffmatrix[ : , [i]]
# note: the brackets around i extract it as a *column* rather than row
notmissing = thiscolumn != 0
# still a column
y = thiscolumn[notmissing].transpose()
# that's a cheap hack to create an array w/ more than one column,
# which the linear regression seems to want
x = datevector[notmissing.transpose()[0]]
# We have to transpose the column "notmissing" to index a row.
x = x[ : , np.newaxis]
# Then we have to make x a row of an array with two
# dimensions (even though it only has one row).
vectorlen = len(x)
word = wordsinorder[i]
model = LinearRegression()
model.fit(x, y)
slope = model.coef_[0]
intercept = model.intercept_
standard_deviation = np.std(y)
nineteenth = np.mean(thiscolumn[0:120])
twentieth =np.mean(thiscolumn[120:])
change = twentieth - nineteenth
approachmid = abs(np.nanmean(thiscolumn[0:60])) - abs(np.nanmean(thiscolumn[150:210]))
approachstd = approachmid / standard_deviation
# note that it's important we use thiscolumn rather than y here, because y has been reduced
# in length
out = dict()
out['word'] = word
out['slope'] = slope
out['mean'] = np.mean(thiscolumn)
out['intercept'] = intercept
out['change'] = change
out['approachmid'] = approachmid
out['approachstd'] = approachstd
outrows.append(out)
with open('../data/' + out_filename + '.slopes.csv', mode = 'w', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = ['word', 'slope', 'mean', 'intercept', 'change', 'approachmid', 'approachstd'])
writer.writeheader()
for row in outrows:
writer.writerow(row)
| mit |
aravart/poolmate | poolmate/test/dummy.py | 2 | 1137 | import sys
import numpy as np
from sklearn.datasets import make_classification
from sklearn.metrics import zero_one_loss
from sklearn.neighbors import KNeighborsClassifier
def inline(inputfile, outputfile):
# data = np.loadtxt(sys.stdin)
data = np.loadtxt(inputfile, delimiter=',')
if np.ndim(data) == 1:
data = np.array([data])
train_x = data[:, 1:]
train_y = data[:, 0]
candidate_size = 1000
evaluation_size = 1000
x, y = make_classification(n_samples=candidate_size + evaluation_size,
n_features=2,
n_informative=1,
n_redundant=1,
n_clusters_per_class=1,
random_state=37)
eval_x = x[candidate_size:]
eval_y = y[candidate_size:]
learner = KNeighborsClassifier(n_neighbors=1)
learner = learner.fit(train_x, train_y)
pred_y = learner.predict(eval_x)
with open(outputfile, 'w') as f:
l = zero_one_loss(eval_y, pred_y)
f.write(str(l))
if __name__ == "__main__":
inline(sys.argv[1], sys.argv[2])
| mit |
Traecp/MCA_GUI | build/lib.linux-x86_64-2.7/McaGUI_v15.py | 2 | 74488 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import scipy.ndimage
from scipy import stats
from scipy.fftpack import fft, fftfreq, fftshift
import os, sys
import gc
from os import listdir
from os.path import isfile,join
import gtk
import matplotlib as mpl
import matplotlib.pyplot as plt
#mpl.use('GtkAgg')
from matplotlib.figure import Figure
#from matplotlib.axes import Subplot
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
from matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as NavigationToolbar
from matplotlib.cm import jet#, gist_rainbow # colormap
from matplotlib.widgets import Cursor
#from matplotlib.patches import Rectangle
from matplotlib import path
#import matplotlib.patches as patches
from matplotlib.ticker import MaxNLocator
import xrayutilities as xu
from lmfit import Parameters, minimize
import h5py as h5
#from Vantec_GUI.spec_complete_MCA import *
from MCA_GUI import mca_spec as SP
__version__ = "1.1.5"
__date__ = "16/10/2014"
__author__ = "Thanh-Tra NGUYEN"
__email__ = "[email protected]"
#mpl.rcParams['font.size'] = 18.0
#mpl.rcParams['axes.labelsize'] = 'large'
mpl.rcParams['legend.fancybox'] = True
mpl.rcParams['legend.handletextpad'] = 0.5
mpl.rcParams['legend.fontsize'] = 'medium'
mpl.rcParams['figure.subplot.bottom'] = 0.13
mpl.rcParams['figure.subplot.top'] = 0.93
mpl.rcParams['figure.subplot.left'] = 0.14
mpl.rcParams['figure.subplot.right'] = 0.915
mpl.rcParams['savefig.dpi'] = 300
def Fourier(X,vect):
N = vect.size #number of data points
T = X[1] - X[0] #sample spacing
TF = fft(vect)
xf = fftfreq(N,T)
xf = fftshift(xf)
yplot = fftshift(TF)
yplot = np.abs(yplot)
yplot = yplot[N/2:]
xf = xf[N/2:]
return xf, yplot/yplot.max()
def flat_data(data,dynlow, dynhigh, log):
""" Returns data where maximum superior than 10^dynhigh will be replaced by 10^dynhigh, inferior than 10^dynlow will be replaced by 10^dynlow"""
if log:
mi = 10**dynlow
ma = 10**dynhigh
data=np.minimum(np.maximum(data,mi),ma)
data=np.log10(data)
else:
mi = dynlow
ma = dynhigh
data=np.minimum(np.maximum(data,mi),ma)
return data
def psdVoigt(parameters,x):
"""Define pseudovoigt function"""
y0 = parameters['y0'].value
xc = parameters['xc'].value
A = parameters['A'].value
w = parameters['w'].value
mu = parameters['mu'].value
y = y0 + A * ( mu * (2/np.pi) * (w / (4*(x-xc)**2 + w**2)) + (1 - mu) * (np.sqrt(4*np.log(2)) / (np.sqrt(np.pi) * w)) * np.exp(-(4*np.log(2)/w**2)*(x-xc)**2) )
return y
def objective(pars,y,x):
#we will minimize this function
err = y - psdVoigt(pars,x)
return err
def init(data_x,data_y,xc,arbitrary=False):
""" param = [y0, xc, A, w, mu]
Je veux que Xc soit la position que l'utilisateur pointe sur l'image pour tracer les profiles"""
param = Parameters()
#idA=np.where(data_x - xc < 1e-4)[0]
if arbitrary:
A = data_y.max()
else:
idA=np.where(data_x==xc)[0][0]
A = data_y[idA]
y0 = 1.0
w = 0.5
mu = 0.5
param.add('y0', value=y0)
param.add('xc', value=xc)
param.add('A', value=A)
param.add('w', value=w)
param.add('mu', value=mu, min=0., max=1.)
return param
def fit(data_x,data_y,xc, arbitrary=False):
""" return: fitted data y, fitted parameters """
param_init = init(data_x,data_y,xc,arbitrary)
if data_x[0] > data_x[-1]:
data_x = data_x[::-1]
result = minimize(objective, param_init, args=(data_y,data_x))
x = np.linspace(data_x.min(),data_x.max(),data_x.shape[0])
y = psdVoigt(param_init,x)
return param_init, y
class PopUpFringes(object):
def __init__(self, xdata, xlabel, ylabel, title):
self.popupwin=gtk.Window()
self.popupwin.set_size_request(600,550)
self.popupwin.set_position(gtk.WIN_POS_CENTER)
self.popupwin.set_border_width(10)
self.xdata = xdata
vbox = gtk.VBox()
self.fig=Figure(dpi=100)
self.ax = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
self.ax.set_xlabel(xlabel, fontsize = 18)
self.ax.set_ylabel(ylabel, fontsize = 18)
self.ax.set_title(title, fontsize = 18)
xi = np.arange(len(self.xdata))
slope, intercept, r_value, p_value, std_err = stats.linregress(self.xdata,xi)
fitline = slope*self.xdata+intercept
self.ax.plot(self.xdata, fitline, 'r-',self.xdata,xi, 'bo')
self.ax.axis([self.xdata.min(),self.xdata.max(),xi.min()-1, xi.max()+1])
self.ax.text(0.3, 0.9,'Slope = %.4f +- %.4f' % (slope, std_err),
horizontalalignment='center',
verticalalignment='center',
transform = self.ax.transAxes,
color='red')
vbox.pack_start(self.main_figure_navBar, False, False, 0)
vbox.pack_start(self.canvas, True, True, 2)
self.popupwin.add(vbox)
self.popupwin.connect("destroy", self.dest)
self.popupwin.show_all()
def dest(self,widget):
self.popupwin.destroy()
class PopUpImage(object):
def __init__(self, xdata, ydata, xlabel, ylabel, title):
self.popupwin=gtk.Window()
self.popupwin.set_size_request(600,550)
self.popupwin.set_position(gtk.WIN_POS_CENTER)
self.popupwin.set_border_width(10)
self.xdata = xdata
self.ydata = ydata
vbox = gtk.VBox()
self.fig=Figure(dpi=100)
self.ax = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
self.canvas.mpl_connect("button_press_event",self.on_press)
self.ax.set_xlabel(xlabel, fontsize = 18)
self.ax.set_ylabel(ylabel, fontsize = 18)
self.ax.set_title(title, fontsize = 18)
self.ax.plot(self.xdata, self.ydata, 'b-', lw=2)
self.textes = []
self.plots = []
vbox.pack_start(self.main_figure_navBar, False, False, 0)
vbox.pack_start(self.canvas, True, True, 2)
self.popupwin.add(vbox)
self.popupwin.connect("destroy", self.dest)
self.popupwin.show_all()
def dest(self,widget):
self.popupwin.destroy()
def on_press(self, event):
if event.inaxes == self.ax and event.button==3:
self.clear_notes()
xc = event.xdata
#***** Find the closest x value *****
residuel = self.xdata - xc
residuel = np.abs(residuel)
j = np.argmin(residuel)
#y = self.ydata[i-1:i+1]
#yc= y.max()
#j = np.where(self.ydata == yc)
#j = j[0][0]
xc= self.xdata[j]
x_fit = self.xdata[j-3:j+3]
y_fit = self.ydata[j-3:j+3]
fitted_param, fitted_data = fit(x_fit, y_fit, xc, True)
x_fit = np.linspace(x_fit.min(), x_fit.max(), 200)
y_fit = psdVoigt(fitted_param, x_fit)
period = fitted_param['xc'].value
std_err= fitted_param['xc'].stderr
p = self.ax.plot(x_fit, y_fit,'r-')
p2 = self.ax.axvline(period,color='green',lw=2)
txt=self.ax.text(0.05, 0.9, 'Period = %.4f +- %.4f (nm)'%(period, std_err), transform = self.ax.transAxes, color='red')
self.textes.append(txt)
self.plots.append(p[0])
self.plots.append(p2)
elif event.inaxes == self.ax and event.button==2:
dif = np.diff(self.ydata)
dif = dif/dif.max()
p3=self.ax.plot(dif,'r-')
self.plots.append(p3[0])
self.canvas.draw()
def clear_notes(self):
if len(self.textes)>0:
for t in self.textes:
t.remove()
if len(self.plots)>0:
for p in self.plots:
p.remove()
self.textes = []
self.plots = []
class MyMainWindow(gtk.Window):
def __init__(self):
super(MyMainWindow, self).__init__()
self.set_title("MCA Reciprocal space map processing. Version %s - last update on: %s"%(__version__,__date__))
self.set_size_request(1200,900)
self.set_position(gtk.WIN_POS_CENTER)
self.set_border_width(10)
self.toolbar = gtk.Toolbar()
self.toolbar.set_style(gtk.TOOLBAR_ICONS)
self.refreshtb = gtk.ToolButton(gtk.STOCK_REFRESH)
self.opentb = gtk.ToolButton(gtk.STOCK_OPEN)
self.sep = gtk.SeparatorToolItem()
self.aspecttb = gtk.ToolButton(gtk.STOCK_PAGE_SETUP)
self.quittb = gtk.ToolButton(gtk.STOCK_QUIT)
self.toolbar.insert(self.opentb, 0)
self.toolbar.insert(self.refreshtb, 1)
self.toolbar.insert(self.aspecttb, 2)
self.toolbar.insert(self.sep, 3)
self.toolbar.insert(self.quittb, 4)
self.tooltips = gtk.Tooltips()
self.tooltips.set_tip(self.refreshtb,"Reload data files")
self.tooltips.set_tip(self.opentb,"Open a folder containing HDF5 (*.h5) data files")
self.tooltips.set_tip(self.aspecttb,"Change the graph's aspect ratio")
self.tooltips.set_tip(self.quittb,"Quit the program")
self.opentb.connect("clicked", self.choose_folder)
self.refreshtb.connect("clicked",self.folder_update)
self.aspecttb.connect("clicked",self.change_aspect_ratio)
self.quittb.connect("clicked", gtk.main_quit)
self.graph_aspect = False #Flag to change the aspect ratio of the graph, False = Auto, True = equal
############################# BOXES ###############################################
vbox = gtk.VBox()
vbox.pack_start(self.toolbar,False,False,0)
hbox=gtk.HBox()
######################### TREE VIEW #############################################
self.sw = gtk.ScrolledWindow()
self.sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
hbox.pack_start(self.sw, False, False, 0)
self.store=[]
self.list_store = gtk.ListStore(str)
self.treeView = gtk.TreeView(self.list_store)
self.treeView.connect("row-activated",self.on_changed_rsm)
rendererText = gtk.CellRendererText()
self.TVcolumn = gtk.TreeViewColumn("RSM data files", rendererText, text=0)
self.TVcolumn.set_sort_column_id(0)
self.treeView.append_column(self.TVcolumn)
self.sw.add(self.treeView)
self.GUI_current_folder = self.DATA_current_folder = os.getcwd()
#******************************************************************
# Notebooks
#******************************************************************
self.notebook = gtk.Notebook()
self.page_GUI = gtk.HBox()
self.page_conversion = gtk.VBox()
self.page_XRDML = gtk.VBox()
######################################FIGURES####################33
#self.page_single_figure = gtk.HBox()
self.midle_panel = gtk.VBox()
self.rsm = ""
self.rsm_choosen = ""
self.my_notes = []
self.lines = []
self.points=[]
self.polygons=[]
self.fig=Figure(dpi=100)
## Draw line for arbitrary profiles
self.arb_lines_X = []
self.arb_lines_Y = []
self.arb_line_points = 0
#self.ax = self.fig.add_subplot(111)
self.ax = self.fig.add_axes([0.1,0.2,0.7,0.7])
self.fig.subplots_adjust(left=0.1,bottom=0.20, top=0.90)
self.vmin = 0
self.vmax = 1000
self.vmax_range = self.vmax
self.canvas = FigureCanvas(self.fig)
Fig_hbox = gtk.HBox()
self.Export_HQ_Image_btn = gtk.Button("Save HQ image")
self.Export_HQ_Image_btn.connect("clicked", self.Export_HQ_Image)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
#Global color bar
self.cax = self.fig.add_axes([0.85, 0.20, 0.03, 0.70])#left,bottom,width,height
#self.canvas.mpl_connect("motion_notify_event",self.on_motion)
self.canvas.mpl_connect("button_press_event",self.on_press)
#self.canvas.mpl_connect("button_release_event",self.on_release)
self.mouse_moved = False #If click without move: donot zoom the image
Fig_hbox.pack_start(self.Export_HQ_Image_btn, False, False, 0)
Fig_hbox.pack_start(self.main_figure_navBar, True,True, 0)
self.midle_panel.pack_start(Fig_hbox, False,False, 0)
self.midle_panel.pack_start(self.canvas, True,True, 2)
self.page_GUI.pack_start(self.midle_panel, True,True, 0)
#hbox.pack_start(self.midle_panel, True,True, 0)
########################################## RIGHT PANEL ###################
self.right_panel = gtk.VBox(False,0)
self.linear_scale_btn = gtk.ToggleButton("Linear scale")
self.linear_scale_btn.set_usize(30,0)
self.linear_scale_btn.connect("toggled",self.log_update)
self.log_scale=0
#self.wavelength_txt = gtk.Label("Energy (eV)")
##self.wavelength_txt.set_alignment(1,0.5)
#self.wavelength_field = gtk.Entry()
#self.wavelength_field.set_text("8333")
#self.wavelength_field.set_usize(30,0)
#self.lattice_const_txt = gtk.Label("Lattice constant (nm)")
#self.lattice_const_txt.set_alignment(1,0.5)
#self.lattice_const = gtk.Entry()
#self.lattice_const.set_text("0.5431")
#self.lattice_const.set_usize(30,0)
self.int_range_txt = gtk.Label("Integration range")
self.int_range_txt.set_alignment(1,0.5)
self.int_range = gtk.Entry()
self.int_range.set_text("0.05")
self.int_range.set_usize(30,0)
self.fitting_range_txt = gtk.Label("Fitting range")
self.fitting_range_txt.set_alignment(1,0.5)
self.fitting_range = gtk.Entry()
self.fitting_range.set_text("0.1")
self.fitting_range.set_usize(30,0)
# ********** Set the default values for configuration *************
self.plotXYprofiles_btn = gtk.RadioButton(None,"Plot X,Y profiles")
self.plotXYprofiles_btn.set_active(False)
self.arbitrary_profiles_btn = gtk.RadioButton(self.plotXYprofiles_btn,"Arbitrary profiles")
self.rectangle_profiles_btn = gtk.RadioButton(self.plotXYprofiles_btn,"ROI projection")
self.option_table = gtk.Table(4,3,False)#Pack the options
self.option_table.attach(self.linear_scale_btn, 0,1,0,1)
self.option_table.attach(self.plotXYprofiles_btn,0,1,1,2)
self.option_table.attach(self.arbitrary_profiles_btn,0,1,2,3)
self.option_table.attach(self.rectangle_profiles_btn,0,1,3,4)
# self.option_table.attach(self.wavelength_txt,1,2,0,1)
# self.option_table.attach(self.wavelength_field,2,3,0,1)
# self.option_table.attach(self.lattice_const_txt,1,2,1,2)
# self.option_table.attach(self.lattice_const, 2,3,1,2)
self.option_table.attach(self.int_range_txt, 1,2,0,1)
self.option_table.attach(self.int_range, 2,3,0,1)
self.option_table.attach(self.fitting_range_txt, 1,2,1,2)
self.option_table.attach(self.fitting_range, 2,3,1,2)
### Options for profile plots
self.profiles_log_btn = gtk.ToggleButton("Y-Log")
self.profiles_log_btn.connect("toggled",self.profiles_update)
self.profiles_export_data_btn = gtk.Button("Export data")
self.profiles_export_data_btn.connect("clicked",self.profiles_export)
self.profiles_option_box = gtk.HBox(False,0)
self.profiles_option_box.pack_start(self.profiles_log_btn, False, False, 0)
self.profiles_option_box.pack_start(self.profiles_export_data_btn, False, False, 0)
### Figure of profiles plot
self.profiles_fringes = []
self.fig_profiles = Figure()
self.profiles_ax1 = self.fig_profiles.add_subplot(211)
self.profiles_ax1.set_title("Qz profile", size=14)
self.profiles_ax2 = self.fig_profiles.add_subplot(212)
self.profiles_ax2.set_title("Qx profile", size=14)
self.profiles_canvas = FigureCanvas(self.fig_profiles)
self.profiles_canvas.set_size_request(450,50)
self.profiles_canvas.mpl_connect("button_press_event",self.profile_press)
self.profiles_navBar = NavigationToolbar(self.profiles_canvas, self)
self.cursor_pro1 = Cursor(self.profiles_ax1, color='k', linewidth=1, useblit=True)
self.cursor_pro2 = Cursor(self.profiles_ax2, color='k', linewidth=1, useblit=True)
#### Results of fitted curves
self.fit_results_table = gtk.Table(7,3, False)
title = gtk.Label("Fitted results:")
self.chi_title = gtk.Label("Qz profile")
self.tth_title = gtk.Label("Qx profile")
y0 = gtk.Label("y0:")
xc = gtk.Label("xc:")
A = gtk.Label("A:")
w = gtk.Label("FWHM:")
mu = gtk.Label("mu:")
y0.set_alignment(0,0.5)
xc.set_alignment(0,0.5)
A.set_alignment(0,0.5)
w.set_alignment(0,0.5)
mu.set_alignment(0,0.5)
self.Qz_fitted_y0 = gtk.Label()
self.Qz_fitted_xc = gtk.Label()
self.Qz_fitted_A = gtk.Label()
self.Qz_fitted_w = gtk.Label()
self.Qz_fitted_mu = gtk.Label()
self.Qx_fitted_y0 = gtk.Label()
self.Qx_fitted_xc = gtk.Label()
self.Qx_fitted_A = gtk.Label()
self.Qx_fitted_w = gtk.Label()
self.Qx_fitted_mu = gtk.Label()
self.fit_results_table.attach(title,0,3,0,1)
self.fit_results_table.attach(self.chi_title,1,2,1,2)
self.fit_results_table.attach(self.tth_title,2,3,1,2)
self.fit_results_table.attach(y0,0,1,2,3)
self.fit_results_table.attach(xc,0,1,3,4)
self.fit_results_table.attach(A,0,1,4,5)
self.fit_results_table.attach(w,0,1,5,6)
self.fit_results_table.attach(mu,0,1,6,7)
self.fit_results_table.attach(self.Qz_fitted_y0,1,2,2,3)
self.fit_results_table.attach(self.Qz_fitted_xc,1,2,3,4)
self.fit_results_table.attach(self.Qz_fitted_A,1,2,4,5)
self.fit_results_table.attach(self.Qz_fitted_w,1,2,5,6)
self.fit_results_table.attach(self.Qz_fitted_mu,1,2,6,7)
self.fit_results_table.attach(self.Qx_fitted_y0,2,3,2,3)
self.fit_results_table.attach(self.Qx_fitted_xc,2,3,3,4)
self.fit_results_table.attach(self.Qx_fitted_A,2,3,4,5)
self.fit_results_table.attach(self.Qx_fitted_w,2,3,5,6)
self.fit_results_table.attach(self.Qx_fitted_mu,2,3,6,7)
#### PACK the right panel
self.right_panel.pack_start(self.option_table, False, False, 0)
self.right_panel.pack_start(self.profiles_option_box,False,False,0)
self.right_panel.pack_start(self.profiles_navBar,False,False,0)
self.right_panel.pack_start(self.profiles_canvas,True,True,0)
self.right_panel.pack_start(self.fit_results_table, False, False, 0)
self.page_GUI.pack_end(self.right_panel,False, False,5)
#********************************************************************
# Conversion data SPEC to HDF page
#********************************************************************
self.conv_box = gtk.VBox()
self.box1 = gtk.HBox()
self.det_frame = gtk.Frame()
self.det_frame.set_label("Detector Vantec")
self.det_frame.set_label_align(0.5,0.5)
self.exp_frame = gtk.Frame()
self.exp_frame.set_label("Experiment parameters")
self.exp_frame.set_label_align(0.5,0.5)
self.conv_frame = gtk.Frame()
self.conv_frame.set_label("Data conversion: SPEC-HDF5")
self.conv_frame.set_label_align(0.5,0.5)
#self.conv_frame.set_alignment(0.5,0.5)
#********************************************************************
# Detector parameters
#********************************************************************
self.det_table = gtk.Table(6,2,False)
self.t1 = gtk.Label("Detector size (mm)")
self.t2 = gtk.Label("Number of channels")
self.t3 = gtk.Label("Center channel")
self.t4 = gtk.Label("Channels/Degree")
self.t5 = gtk.Label("ROI (from-to)")
self.t6 = gtk.Label("Orientation")
self.t1.set_alignment(0,0.5)
self.t2.set_alignment(0,0.5)
self.t3.set_alignment(0,0.5)
self.t4.set_alignment(0,0.5)
self.t5.set_alignment(0,0.5)
self.t6.set_alignment(0,0.5)
self.t1_entry = gtk.Entry()
self.t1_entry.set_text("50")
self.t2_entry = gtk.Entry()
self.t2_entry.set_text("2048")
self.t3_entry = gtk.Entry()
self.t3_entry.set_text("819.87")
self.t4_entry = gtk.Entry()
self.t4_entry.set_text("211.012")
self.small_box = gtk.HBox()
self.t5_label = gtk.Label("-")
self.t5_entry1 = gtk.Entry()
self.t5_entry1.set_text("40")
self.t5_entry2 = gtk.Entry()
self.t5_entry2.set_text("1300")
self.small_box.pack_start(self.t5_entry1,True, True,0)
self.small_box.pack_start(self.t5_label,True, True,0)
self.small_box.pack_start(self.t5_entry2,True, True,0)
self.t6_entry = gtk.combo_box_new_text()
self.t6_entry.append_text("Up (zero on the bottom)")
self.t6_entry.append_text("Down (zero on the top)")
self.t6_entry.set_active(1)
self.det_table.attach(self.t1, 0,1,0,1)
self.det_table.attach(self.t2, 0,1,1,2)
self.det_table.attach(self.t3, 0,1,2,3)
self.det_table.attach(self.t4, 0,1,3,4)
self.det_table.attach(self.t5, 0,1,4,5)
self.det_table.attach(self.t6, 0,1,5,6)
self.det_table.attach(self.t1_entry, 1,2,0,1)
self.det_table.attach(self.t2_entry, 1,2,1,2)
self.det_table.attach(self.t3_entry, 1,2,2,3)
self.det_table.attach(self.t4_entry, 1,2,3,4)
self.det_table.attach(self.small_box, 1,2,4,5)
self.det_table.attach(self.t6_entry, 1,2,5,6)
self.det_table_align = gtk.Alignment()
self.det_table_align.set_padding(15,10,10,10)
self.det_table_align.set(0.5, 0.5, 1.0, 1.0)
self.det_table_align.add(self.det_table)
self.det_frame.add(self.det_table_align)
#********************************************************************
# Experiment parameters
#********************************************************************
self.exp_table = gtk.Table(6,2,False)
self.e1 = gtk.Label("Substrate material:")
self.e1_other = gtk.Label("If other:")
self.e2 = gtk.Label("Energy (eV)")
self.e3 = gtk.Label("Attenuation coefficient file")
self.e4 = gtk.Label("Foil colunm name (in SPEC file)")
self.e5 = gtk.Label("Monitor colunm name (in SPEC file)")
self.e6 = gtk.Label("Reference monitor (for normalization)")
self.e1.set_alignment(0,0.5)
self.e1_other.set_alignment(1,0.5)
self.e2.set_alignment(0,0.5)
self.e3.set_alignment(0,0.5)
self.e4.set_alignment(0,0.5)
self.e5.set_alignment(0,0.5)
self.e6.set_alignment(0,0.5)
#self.e1_entry = gtk.Label("Si for now")
self.e1_entry = gtk.combo_box_new_text()
self.e1_entry.append_text("-- other")
self.e1_entry.append_text("Si")
self.e1_entry.append_text("Ge")
self.e1_entry.append_text("GaAs")
self.e1_entry.append_text("GaP")
self.e1_entry.append_text("GaSb")
self.e1_entry.append_text("InAs")
self.e1_entry.append_text("InP")
self.e1_entry.append_text("InSb")
self.e1_entry.set_active(1)
self.e1_entry_other = gtk.Entry()
self.e1_entry_other.set_text("")
self.e2_entry = gtk.Entry()
self.e2_entry.set_text("8333")
self.e3_box = gtk.HBox()
self.e3_path =gtk.Entry()
self.e3_browse = gtk.Button("Browse")
self.e3_browse.connect("clicked", self.select_file, self.e3_path, "A")
self.e3_box.pack_start(self.e3_path, False, False, 0)
self.e3_box.pack_start(self.e3_browse, False, False, 0)
self.e4_entry = gtk.Entry()
self.e4_entry.set_text("pfoil")
self.e5_entry = gtk.Entry()
self.e5_entry.set_text("vct3")
self.e6_entry = gtk.Entry()
self.e6_entry.set_text("1e6")
substrate_box1 = gtk.HBox()
substrate_box2 = gtk.HBox()
substrate_box1.pack_start(self.e1, False, False, 0)
substrate_box1.pack_start(self.e1_entry, False, False, 0)
substrate_box2.pack_start(self.e1_other, False, False, 0)
substrate_box2.pack_start(self.e1_entry_other, False, False, 0)
self.exp_table.attach(substrate_box1, 0,1,0,1)
self.exp_table.attach(self.e2, 0,1,1,2)
self.exp_table.attach(self.e3, 0,1,2,3)
self.exp_table.attach(self.e4, 0,1,3,4)
self.exp_table.attach(self.e5, 0,1,4,5)
self.exp_table.attach(self.e6, 0,1,5,6)
self.exp_table.attach(substrate_box2, 1,2,0,1)
self.exp_table.attach(self.e2_entry, 1,2,1,2)
self.exp_table.attach(self.e3_box, 1,2,2,3)
self.exp_table.attach(self.e4_entry, 1,2,3,4)
self.exp_table.attach(self.e5_entry, 1,2,4,5)
self.exp_table.attach(self.e6_entry, 1,2,5,6)
self.exp_table_align = gtk.Alignment()
self.exp_table_align.set_padding(15,10,10,10)
self.exp_table_align.set(0.5, 0.5, 1.0, 1.0)
self.exp_table_align.add(self.exp_table)
self.exp_frame.add(self.exp_table_align)
#********************************************************************
# Data conversion information
#********************************************************************
self.conv_table = gtk.Table(6,3,False)
self.c1 = gtk.Label("Spec file")
self.c2 = gtk.Label("MCA file")
self.c3 = gtk.Label("Destination folder")
self.c4 = gtk.Label("Scan number (from-to)")
self.c5 = gtk.Label("Description for each RSM (optional-separate by comma)")
self.c6 = gtk.Label("Problem of foil delay (foil[n]-->data[n+1])")
self.c1.set_alignment(0,0.5)
self.c2.set_alignment(0,0.5)
self.c3.set_alignment(0,0.5)
self.c4.set_alignment(0,0.5)
self.c5.set_alignment(0,0.5)
self.c6.set_alignment(0,0.5)
self.c1_entry1 = gtk.Entry()
self.c2_entry1 = gtk.Entry()
self.c3_entry1 = gtk.Entry()
self.c4_entry1 = gtk.Entry()
self.c5_entry1 = gtk.Entry()
self.c5_entry1.set_text("")
self.c6_entry = gtk.CheckButton()
self.c1_entry2 = gtk.Button("Browse SPEC")
self.c2_entry2 = gtk.Button("Browse MCA")
self.c3_entry2 = gtk.Button("Browse Folder")
self.c4_entry2 = gtk.Entry()
self.c1_entry2.connect("clicked", self.select_file, self.c1_entry1, "S")
self.c2_entry2.connect("clicked", self.select_file, self.c2_entry1, "M")
self.c3_entry2.connect("clicked", self.select_folder, self.c3_entry1, "D")
self.conv_table.attach(self.c1, 0,1,0,1)
self.conv_table.attach(self.c2, 0,1,1,2)
self.conv_table.attach(self.c3, 0,1,2,3)
self.conv_table.attach(self.c4, 0,1,3,4)
self.conv_table.attach(self.c5, 0,1,4,5)
self.conv_table.attach(self.c6, 0,1,5,6)
self.conv_table.attach(self.c1_entry1, 1,2,0,1)
self.conv_table.attach(self.c2_entry1, 1,2,1,2)
self.conv_table.attach(self.c3_entry1, 1,2,2,3)
self.conv_table.attach(self.c4_entry1, 1,2,3,4)
self.conv_table.attach(self.c5_entry1, 1,3,4,5)
self.conv_table.attach(self.c6_entry, 1,2,5,6)
self.conv_table.attach(self.c1_entry2, 2,3,0,1)
self.conv_table.attach(self.c2_entry2, 2,3,1,2)
self.conv_table.attach(self.c3_entry2, 2,3,2,3)
self.conv_table.attach(self.c4_entry2, 2,3,3,4)
self.conv_table_align = gtk.Alignment()
self.conv_table_align.set_padding(15,10,10,10)
self.conv_table_align.set(0.5, 0.5, 1.0, 1.0)
self.conv_table_align.add(self.conv_table)
self.conv_frame.add(self.conv_table_align)
#********************************************************************
# The RUN button
#********************************************************************
self.run_conversion = gtk.Button("Execute")
self.run_conversion.connect("clicked", self.spec2HDF)
self.run_conversion.set_size_request(50,30)
self.show_info = gtk.Label()
#********************************************************************
# Pack the frames
#********************************************************************
self.box1.pack_start(self.det_frame,padding=15)
self.box1.pack_end(self.exp_frame, padding =15)
self.conv_box.pack_start(self.box1,padding=15)
self.conv_box.pack_start(self.conv_frame,padding=5)
self.conv_box.pack_start(self.run_conversion, False,False,10)
self.conv_box.pack_start(self.show_info, False,False,10)
self.page_conversion.pack_start(self.conv_box,False, False,20)
#********************************************************************
# Conversion XRDML data to HDF
#********************************************************************
self.XRDML_conv_box = gtk.VBox()
self.Instrument_table = gtk.Table(1,4,True)
self.Inst_txt = gtk.Label("Instrument:")
self.Inst_txt.set_alignment(0,0.5)
self.Instrument = gtk.combo_box_new_text()
self.Instrument.append_text("Bruker")
self.Instrument.append_text("PANalytical")
self.Instrument.set_active(0)
self.Instrument_table.attach(self.Inst_txt,0,1,0,1)
self.Instrument_table.attach(self.Instrument, 1,2,0,1)
self.Instrument.connect("changed",self.Change_Lab_Instrument)
self.XRDML_table = gtk.Table(7,4,True)
self.XRDML_tooltip = gtk.Tooltips()
self.XRDML_substrate_txt = gtk.Label("Substrate material:")
self.XRDML_substrate_other_txt = gtk.Label("If other:")
self.XRDML_substrate_inplane_txt= gtk.Label("In-plane direction (i.e. 1 1 0)")
self.XRDML_substrate_outplane_txt= gtk.Label("Out-of-plane direction (i.e. 0 0 1)")
self.XRDML_reflection_txt = gtk.Label("Reflection (H K L) - optional:")
self.XRDML_energy_txt = gtk.Label("Energy (eV):")
self.XRDML_description_txt = gtk.Label("Description of the sample:")
self.XRDML_xrdml_file_txt = gtk.Label("Select RAW file:")
self.XRDML_destination_txt = gtk.Label("Select a destination folder:")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_txt, "Substrate material")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_other_txt, "The substrate material, i.e. Al, SiO2, CdTe, GaN,...")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_inplane_txt, "The substrate in-plane an out-of-plane direction - for calculation of the orientation matrix.")
self.XRDML_tooltip.set_tip(self.XRDML_reflection_txt, "H K L, separate by space, i.e. 2 2 4 (0 0 0 for a XRR map). This is used for offset correction.")
self.XRDML_tooltip.set_tip(self.XRDML_energy_txt, "If empty, the default Cu K_alpha_1 will be used.")
self.XRDML_tooltip.set_tip(self.XRDML_description_txt, "Description of the sample, this will be the name of the converted file. If empty, it will be named 'RSM.h5'")
self.XRDML_tooltip.set_tip(self.XRDML_xrdml_file_txt, "Select the data file recorded by the chosen equipment")
self.XRDML_tooltip.set_tip(self.XRDML_destination_txt, "Select a destination folder to store the converted file.")
self.XRDML_substrate_txt.set_alignment(0,0.5)
self.XRDML_substrate_other_txt.set_alignment(1,0.5)
self.XRDML_substrate_inplane_txt.set_alignment(0,0.5)
self.XRDML_substrate_outplane_txt.set_alignment(1,0.5)
self.XRDML_reflection_txt.set_alignment(0,0.5)
self.XRDML_energy_txt.set_alignment(0,0.5)
self.XRDML_description_txt.set_alignment(0,0.5)
self.XRDML_xrdml_file_txt.set_alignment(0,0.5)
self.XRDML_destination_txt.set_alignment(0,0.5)
self.XRDML_substrate = gtk.combo_box_new_text()
self.XRDML_substrate.append_text("-- other")
self.XRDML_substrate.append_text("Si")
self.XRDML_substrate.append_text("Ge")
self.XRDML_substrate.append_text("GaAs")
self.XRDML_substrate.append_text("GaP")
self.XRDML_substrate.append_text("GaSb")
self.XRDML_substrate.append_text("InAs")
self.XRDML_substrate.append_text("InP")
self.XRDML_substrate.append_text("InSb")
self.XRDML_substrate.set_active(0)
self.XRDML_substrate_other = gtk.Entry()
self.XRDML_substrate_other.set_text("")
self.XRDML_substrate_inplane = gtk.Entry()
self.XRDML_substrate_inplane.set_text("")
self.XRDML_substrate_outplane = gtk.Entry()
self.XRDML_substrate_outplane.set_text("")
self.XRDML_reflection = gtk.Entry()
self.XRDML_reflection.set_text("")
self.XRDML_energy = gtk.Entry()
self.XRDML_energy.set_text("")
self.XRDML_description = gtk.Entry()
self.XRDML_description.set_text("")
self.XRDML_xrdml_file_path = gtk.Entry()
self.XRDML_destination_path = gtk.Entry()
self.XRDML_xrdml_file_browse = gtk.Button("Browse RAW file")
self.XRDML_destination_browse= gtk.Button("Browse destination folder")
self.XRDML_xrdml_file_browse.connect("clicked", self.select_file, self.XRDML_xrdml_file_path, "S")
self.XRDML_destination_browse.connect("clicked", self.select_folder, self.XRDML_destination_path, "D")
self.XRDML_table.attach(self.XRDML_substrate_txt, 0,1,0,1)
self.XRDML_table.attach(self.XRDML_substrate, 1,2,0,1)
self.XRDML_table.attach(self.XRDML_substrate_other_txt, 2,3,0,1)
self.XRDML_table.attach(self.XRDML_substrate_other, 3,4,0,1)
self.XRDML_table.attach(self.XRDML_substrate_inplane_txt, 0,1,1,2)
self.XRDML_table.attach(self.XRDML_substrate_inplane, 1,2,1,2)
self.XRDML_table.attach(self.XRDML_substrate_outplane_txt, 2,3,1,2)
self.XRDML_table.attach(self.XRDML_substrate_outplane, 3,4,1,2)
self.XRDML_table.attach(self.XRDML_reflection_txt, 0,1,2,3)
self.XRDML_table.attach(self.XRDML_reflection, 1,2,2,3)
self.XRDML_table.attach(self.XRDML_energy_txt,0,1,3,4)
self.XRDML_table.attach(self.XRDML_energy, 1,2,3,4)
self.XRDML_table.attach(self.XRDML_description_txt, 0,1,4,5)
self.XRDML_table.attach(self.XRDML_description, 1,2,4,5)
self.XRDML_table.attach(self.XRDML_xrdml_file_txt, 0,1,5,6)
self.XRDML_table.attach(self.XRDML_xrdml_file_path, 1,2,5,6)
self.XRDML_table.attach(self.XRDML_xrdml_file_browse, 2,3,5,6)
self.XRDML_table.attach(self.XRDML_destination_txt, 0,1,6,7)
self.XRDML_table.attach(self.XRDML_destination_path, 1,2,6,7)
self.XRDML_table.attach(self.XRDML_destination_browse, 2,3,6,7)
#********************************************************************
# The RUN button
#********************************************************************
self.XRDML_run = gtk.Button("Execute")
self.XRDML_run.connect("clicked", self.Convert_Lab_Source)
self.XRDML_run.set_size_request(50,30)
self.XRDML_show_info = gtk.Label()
#********************************************************************
# Pack the XRDML options
#********************************************************************
self.XRDML_conv_box.pack_start(self.Instrument_table, False, False,5)
self.XRDML_conv_box.pack_start(self.XRDML_table, False, False, 10)
self.XRDML_conv_box.pack_start(self.XRDML_run, False, False, 5)
self.XRDML_conv_box.pack_start(self.XRDML_show_info, False,False,10)
self.page_XRDML.pack_start(self.XRDML_conv_box,False, False,20)
#********************************************************************
# Pack the notebook
#********************************************************************
self.notebook.append_page(self.page_GUI, gtk.Label("RSM GUI"))
self.notebook.append_page(self.page_conversion, gtk.Label("ESRF-MCA spec file (Vantec)"))
self.notebook.append_page(self.page_XRDML, gtk.Label("Lab instruments"))
hbox.pack_start(self.notebook)
vbox.pack_start(hbox,True,True,0)
############################### Sliders ######################################
#sld_box = gtk.Fixed()
sld_box = gtk.HBox(False,2)
self.vmin_txt = gtk.Label("Vmin")
self.vmin_txt.set_alignment(0,0.5)
#self.vmin_txt.set_justify(gtk.JUSTIFY_CENTER)
self.vmax_txt = gtk.Label("Vmax")
self.vmax_txt.set_alignment(0,0.5)
#self.vmax_txt.set_justify(gtk.JUSTIFY_CENTER)
self.sld_vmin = gtk.HScale()
self.sld_vmax = gtk.HScale()
self.sld_vmin.set_size_request(200,25)
self.sld_vmax.set_size_request(200,25)
self.sld_vmin.set_range(0,self.vmax)
self.sld_vmax.set_range(0,self.vmax)
self.sld_vmax.set_value(self.vmax)
self.sld_vmin.set_value(0)
self.sld_vmin.connect('value-changed',self.scale_update)
self.sld_vmax.connect('value-changed',self.scale_update)
vmax_spin_adj = gtk.Adjustment(self.vmax, 0, self.vmax_range, 0.5, 10.0, 0.0)
self.vmax_spin_btn = gtk.SpinButton(vmax_spin_adj,1,1)
self.vmax_spin_btn.set_numeric(True)
self.vmax_spin_btn.set_wrap(True)
self.vmax_spin_btn.set_size_request(80,-1)
#self.vmax_spin_btn.set_alignment(0,0.5)
self.vmax_spin_btn.connect('value-changed',self.scale_update_spin)
vmin_spin_adj = gtk.Adjustment(self.vmin, 0, self.vmax_range, 0.5, 10.0, 0.0)
self.vmin_spin_btn = gtk.SpinButton(vmin_spin_adj,1,1)
self.vmin_spin_btn.set_numeric(True)
self.vmin_spin_btn.set_wrap(True)
self.vmin_spin_btn.set_size_request(80,-1)
#self.vmax_spin_btn.set_alignment(0,0.5)
self.vmin_spin_btn.connect('value-changed',self.scale_update_spin)
sld_box.pack_start(self.vmin_txt,False,False,0)
sld_box.pack_start(self.sld_vmin,False,False,0)
sld_box.pack_start(self.vmin_spin_btn,False,False,0)
sld_box.pack_start(self.vmax_txt,False,False,0)
sld_box.pack_start(self.sld_vmax,False,False,0)
sld_box.pack_start(self.vmax_spin_btn,False,False,0)
#sld_box.pack_start(self.slider_reset_btn,False,False,0)
vbox.pack_start(sld_box,False,False,3)
self.add(vbox)
self.connect("destroy", gtk.main_quit)
self.show_all()
#########################################################################################################################
def format_coord(self, x, y):
#***** Add intensity information into the navigation toolbar *******************************
numrows, numcols = (self.gridder.data.T).shape
col,row = xu.analysis.line_cuts.getindex(x, y, self.gridder.xaxis, self.gridder.yaxis)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = self.gridder.data.T[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4f, y=%1.4f'%(x, y)
def pro_format_coord(self,x,y):
return 'x=%.4f, y=%.1f'%(x,y)
def init_image(self,log=False):
self.ax.cla()
self.cax.cla()
#print "Initialize image ..."
#
#self.clevels = np.linspace(self.vmin, self.vmax, 100)
if log:
self.img = self.ax.pcolormesh(self.gridder.xaxis, self.gridder.yaxis, np.log10(self.gridder.data.T),vmin=self.vmin, vmax=self.vmax)
#self.img = self.ax.contour(self.gridder.xaxis, self.gridder.yaxis, np.log10(self.gridder.data.T), self.clevels, vmin=self.vmin, vmax=self.vmax)
else:
self.img = self.ax.pcolormesh(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data.T,vmin=self.vmin, vmax=self.vmax)
#self.img = self.ax.contour(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data.T, self.clevels, vmin=self.vmin, vmax=self.vmax)
self.img.cmap.set_under(alpha=0)
self.ax.axis([self.gridder.xaxis.min(), self.gridder.xaxis.max(), self.gridder.yaxis.min(), self.gridder.yaxis.max()])
#self.ax.set_aspect('equal')
xlabel = r'$Q_x (nm^{-1})$'
ylabel = r'$Q_z (nm^{-1})$'
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
self.ax.yaxis.label.set_size(20)
self.ax.xaxis.label.set_size(20)
self.ax.set_title(self.rsm_description,fontsize=20)
self.ax.format_coord = self.format_coord
self.cb = self.fig.colorbar(self.img, cax = self.cax, format="%.1f")#format=fm
if self.log_scale==1:
self.cb.set_label(r'$Log_{10}\ (Intensity)\ [arb.\ units]$',fontsize=20)
else:
self.cb.set_label(r'$Intensity\ (Counts\ per\ second)$', fontsize=20)
self.cb.locator = MaxNLocator(nbins=6)
#self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
#print "Image is initialized."
def change_aspect_ratio(self,w):
self.graph_aspect = not (self.graph_aspect)
if self.graph_aspect == True:
self.ax.set_aspect('equal')
else:
self.ax.set_aspect('auto')
self.canvas.draw()
def on_changed_rsm(self,widget,row,col):
#print "************Change RSM*************"
gc.collect() #Clear unused variables to gain memory
#************** Remind the structure of these HDF5 files:
# ************* file=[scan_id={'eta'=[data], '2theta'=[data], 'intensity'=[data], 'description'='RSM 004 ...'}]
self.clear_notes()
#self.init_image()
model = widget.get_model()
self.rsm_choosen = model[row][0]
self.rsm = join(self.GUI_current_folder,self.rsm_choosen)#file path
self.rsm_info = h5.File(self.rsm,'r')#HDF5 object that collects all information of this scan
#self.ax.set_title(self.rsm_choosen,fontsize=20)
### Data Loading ##
groups = self.rsm_info.keys()
scan = groups[0]
self.scan = self.rsm_info[scan]
self.data = self.scan.get('intensity').value
self.Qx = self.scan.get('Qx').value
self.Qy = self.scan.get('Qy').value
self.Qz = self.scan.get('Qz').value
self.rsm_description = self.scan.get('description').value
self.rsm_info.close()
#print "Data are successfully loaded."
self.gridder = xu.Gridder2D(self.data.shape[0],self.data.shape[1])
#print "Gridder is calculated."
# MM = self.data.max()
# M = np.log10(MM)
# data = flat_data(self.data,0,M)
self.gridder(self.Qx, self.Qz, self.data)
self.data = self.gridder.data.T
self.vmin=self.data.min()
self.vmax=self.data.max()
#print "Starting scale_plot()"
self.scale_plot()
#self.slider_update()
def scale_plot(self):
#print "Scale_plot() is called."
data = self.data.copy()
#self.init_image()
if self.linear_scale_btn.get_active():
self.linear_scale_btn.set_label("--> Linear scale")
data = np.log10(data)
#print data.max()
self.init_image(log=True)
actual_vmin = self.sld_vmin.get_value()
actual_vmax = self.sld_vmax.get_value()
self.vmax = np.log10(actual_vmax) if self.log_scale == 0 else actual_vmax
if actual_vmin == 0:
self.vmin=0
elif actual_vmin >0:
self.vmin = np.log10(actual_vmin) if self.log_scale == 0 else actual_vmin
self.vmax_range = data.max()
self.log_scale = 1
#log=True
else:
self.linear_scale_btn.set_label("--> Log scale")
self.init_image(log=False)
#print "Calculating min max and update slider..."
actual_vmin = self.sld_vmin.get_value()
actual_vmax = self.sld_vmax.get_value()
#print "Actual vmax: ",actual_vmax
if self.log_scale == 1:
self.vmax = np.power(10.,actual_vmax)
else:
self.vmax = actual_vmax
self.vmax_range = data.max()
if actual_vmin ==0:
self.vmin = 0
elif actual_vmin>0:
if self.log_scale == 0:
self.vmin = actual_vmin
elif self.log_scale == 1:
self.vmin = np.power(10,actual_vmin)
self.log_scale = 0
#log=False
#print "Min max are calculated."
self.sld_vmax.set_range(-6,self.vmax_range)
self.sld_vmin.set_range(-6,self.vmax_range)
#self.init_image(log)
self.slider_update()
def log_update(self,widget):
self.scale_plot()
if self.log_scale==1:
self.cb.set_label(r'$Log_{10}\ (Counts\ per\ second)\ [arb.\ units]$',fontsize=18)
else:
self.cb.set_label(r'$Intensity\ (Counts\ per\ second)$', fontsize=18)
#self.slider_update()
def scale_update(self,widget):
#print "Scale_update() is called."
self.vmin = self.sld_vmin.get_value()
self.vmax = self.sld_vmax.get_value()
self.vmin_spin_btn.set_value(self.vmin)
self.vmax_spin_btn.set_value(self.vmax)
self.slider_update()
def scale_update_spin(self,widget):
#print "Spin_update() is called"
self.vmin = self.vmin_spin_btn.get_value()
self.vmax = self.vmax_spin_btn.get_value()
self.slider_update()
def slider_update(self):
#print "slider_update() is called"
#self.img.set_clim(self.vmin, self.vmax)
self.sld_vmax.set_value(self.vmax)
self.sld_vmin.set_value(self.vmin)
if self.linear_scale_btn.get_active():
self.vmin_spin_btn.set_adjustment(gtk.Adjustment(self.vmin, 0, self.vmax_range, 0.1, 1.0, 0))
self.vmax_spin_btn.set_adjustment(gtk.Adjustment(self.vmax, 0, self.vmax_range, 0.1, 1.0, 0))
else:
self.vmin_spin_btn.set_adjustment(gtk.Adjustment(self.vmin, 0, self.vmax_range, 10, 100, 0))
self.vmax_spin_btn.set_adjustment(gtk.Adjustment(self.vmax, 0, self.vmax_range, 10, 100, 0))
#self.vmax_spin_btn.update()
self.img.set_clim(self.vmin, self.vmax)
self.ax.relim()
self.canvas.draw()
#print "slider_update() stoped."
def choose_folder(self, w):
dialog = gtk.FileChooserDialog(title="Select a data folder",action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.GUI_current_folder)
response=dialog.run()
if response==gtk.RESPONSE_OK:
folder=dialog.get_filename()
folder = folder.decode('utf8')
folder_basename = folder.split("/")[-1]
#print folder_basename
self.store= [i for i in listdir(folder) if isfile(join(folder,i)) and i.endswith(".data") or i.endswith(".h5")]
self.GUI_current_folder = folder
#print store
if len(self.store)>0:
self.list_store.clear()
for i in self.store:
self.list_store.append([i])
self.TVcolumn.set_title(folder_basename)
else:
pass
else:
pass
dialog.destroy()
def folder_update(self, w):
folder = self.GUI_current_folder
if folder is not os.getcwd():
store= [i for i in listdir(folder) if isfile(join(folder,i)) and i.endswith(".data") or i.endswith(".h5")]
self.store=[]
self.list_store.clear()
for i in store:
self.list_store.append([i])
self.store.append(i)
def arbitrary_line_cut(self, x, y):
#**** num: integer - number of points to be extracted
#**** convert Q coordinates to pixel coordinates
x0, y0 = xu.analysis.line_cuts.getindex(x[0], y[0], self.gridder.xaxis, self.gridder.yaxis)
x1, y1 = xu.analysis.line_cuts.getindex(x[1], y[1], self.gridder.xaxis, self.gridder.yaxis)
num = int(np.hypot(x1-x0, y1-y0)) #number of points that will be plotted
xi, yi = np.linspace(x0, x1, num), np.linspace(y0, y1, num)
profiles_data_X = profiles_data_Y = scipy.ndimage.map_coordinates(self.gridder.data, np.vstack((xi,yi)))
coor_X_export,coor_Y_export = np.linspace(x[0], x[1], num), np.linspace(y[0], y[1], num)
#coor_X_export = np.sort(coor_X_export)
#coor_Y_export = np.sort(coor_Y_export)
return coor_X_export,coor_Y_export, profiles_data_X, profiles_data_Y
def boundary_rectangles(self, x, y):
"""
IN : x[0,1], y[0,1]: positions of the line cut (arbitrary direction)
OUT: ROI rectangle: the rectangle in which the data will be taken
Bound rectangle: the limit values for Qx, Qz line cuts (min, max)
"""
x = np.asarray(x)
y = np.asarray(y)
alpha = np.arctan(abs((y[1]-y[0])/(x[1]-x[0]))) # inclined angle of the ROI w.r.t the horizontal line. Attention to the sign of alpha
#print np.degrees(alpha)
T = self.largueur_int/2.
if np.degrees(alpha)>55.0:
inc_x = 1
inc_y = 0
else:
inc_x = 0
inc_y = 1
y1 = y + T*inc_y
y2 = y - T*inc_y
x1 = x + T*inc_x
x2 = x - T*inc_x
#These positions are in reciprocal space units. The boundary order will be: 1-2-2-1
roi_rect = [[y1[0],x1[0]],[y2[0],x2[0]],[y2[1],x2[1]],[y1[1],x1[1]],[y1[0],x1[0]]]
roi_rect = path.Path(roi_rect)
#***************** Get the corresponding index of these points ***************************
i1,j1 = xu.analysis.line_cuts.getindex(x1[0], y1[0], self.gridder.xaxis, self.gridder.yaxis)
i2,j2 = xu.analysis.line_cuts.getindex(x2[0], y2[0], self.gridder.xaxis, self.gridder.yaxis)
i3,j3 = xu.analysis.line_cuts.getindex(x2[1], y2[1], self.gridder.xaxis, self.gridder.yaxis)
i4,j4 = xu.analysis.line_cuts.getindex(x1[1], y1[1], self.gridder.xaxis, self.gridder.yaxis)
roi_box = [[j1,i1],[j2,i2],[j3,i3],[j4,i4],[j1,i1]]
roi_box = path.Path(roi_box)
#******* Calculate the limit boundary rectangle
y_tmp = np.vstack((y1, y2))
x_tmp = np.vstack((x1, x2))
y_min = y_tmp.min()
y_max = y_tmp.max()
x_min = x_tmp.min()
x_max = x_tmp.max()
bound_rect = [x_min, x_max, y_min, y_max]
bound_rect = np.asarray(bound_rect)
contours = roi_rect.vertices
p=self.ax.plot(contours[:,1], contours[:,0], linewidth=1.5, color='white')
self.polygons.append(p[0])
self.canvas.draw()
return roi_box, bound_rect
def extract_roi_data(self, roi_box, bound_rect):
#***** Extraction of the ROI defined by the ROI box ******************
qx_min = bound_rect[0]
qx_max = bound_rect[1]
qz_min = bound_rect[2]
qz_max = bound_rect[3]
#***** Getting index of the boundary points in order to calculate the length of the extracted array
ixmin, izmin = xu.analysis.line_cuts.getindex(qx_min, qz_min, self.gridder.xaxis, self.gridder.yaxis)
ixmax, izmax = xu.analysis.line_cuts.getindex(qx_max, qz_max, self.gridder.xaxis, self.gridder.yaxis)
x_steps = ixmax - ixmin +1
z_steps = izmax - izmin +1
qx_coor = np.linspace(qx_min, qx_max, x_steps)
qz_coor = np.linspace(qz_min, qz_max, z_steps)
ROI = np.zeros(shape=(x_steps))
#****** Extract Qx line cuts ************************
for zi in range(izmin, izmax+1):
qx_int = self.gridder.data[ixmin:ixmax+1,zi]
#****** if the point is inside the ROI box: point = 0
inpoints = []
for i in range(ixmin,ixmax+1):
inpoint= roi_box.contains_point([zi,i])
inpoints.append(inpoint)
for b in range(len(inpoints)):
if inpoints[b]==False:
qx_int[b] = 0
ROI = np.vstack((ROI, qx_int))
ROI = np.delete(ROI, 0, 0) #Delete the first line which contains zeros
#****** Sum them up! Return Qx, Qz projection zones and Qx,Qz intensity
qx_ROI = ROI.sum(axis=0)/ROI.shape[0]
qz_ROI = ROI.sum(axis=1)/ROI.shape[1]
return qx_coor, qx_ROI, qz_coor, qz_ROI
def plot_profiles(self, x, y, cross_line=True):
if cross_line:
"""Drawing lines where I want to plot profiles"""
# ******** if this is not an arbitrary profile, x and y are not lists but just one individual point
x=x[0]
y=y[0]
hline = self.ax.axhline(y, color='k', ls='--', lw=1)
self.lines.append(hline)
vline = self.ax.axvline(x, color='k', ls='--', lw=1)
self.lines.append(vline)
"""Getting data to be plotted"""
self.coor_X_export, self.profiles_data_X = xu.analysis.line_cuts.get_qx_scan(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data, y, qrange=self.largueur_int)
self.coor_Y_export, self.profiles_data_Y = xu.analysis.line_cuts.get_qz_scan(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data, x, qrange=self.largueur_int)
xc = x
yc = y
""" Fitting information """
ix,iy = xu.analysis.line_cuts.getindex(x, y, self.gridder.xaxis, self.gridder.yaxis)
ix_left,iy = xu.analysis.line_cuts.getindex(x-self.fitting_width, y, self.gridder.xaxis, self.gridder.yaxis)
qx_2_fit = self.coor_X_export[ix_left:ix*2-ix_left+1]
qx_int_2_fit = self.profiles_data_X[ix_left:2*ix-ix_left+1]
X_fitted_params, X_fitted_data = fit(qx_2_fit, qx_int_2_fit,xc, cross_line)
####################axX.plot(qx_2_fit, qx_fit_data, color='red',linewidth=2)
ix,iy_down = xu.analysis.line_cuts.getindex(x, y-self.fitting_width, self.gridder.xaxis, self.gridder.yaxis)
qz_2_fit = self.coor_Y_export[iy_down:iy*2-iy_down+1]
qz_int_2_fit = self.profiles_data_Y[iy_down:iy*2-iy_down+1]
Y_fitted_params, Y_fitted_data = fit(qz_2_fit, qz_int_2_fit,yc, cross_line)
####################axY.plot(qz_2_fit, qz_fit_data, color='red',linewidth=2)
else:
#**** extract arbitrary line cut
#**** extract one single line cut:
if not self.rectangle_profiles_btn.get_active():
self.coor_X_export, self.coor_Y_export, self.profiles_data_X, self.profiles_data_Y = self.arbitrary_line_cut(x,y)
else:
roi_box,bound_rect = self.boundary_rectangles(x,y)
self.coor_X_export, self.profiles_data_X, self.coor_Y_export, self.profiles_data_Y = self.extract_roi_data(roi_box, bound_rect)
tmpX = np.sort(self.coor_X_export)
tmpY = np.sort(self.coor_Y_export)
xc = tmpX[self.profiles_data_X.argmax()]
yc = tmpY[self.profiles_data_Y.argmax()]
""" Fitting information """
X_fitted_params, X_fitted_data = fit(self.coor_X_export, self.profiles_data_X, xc, not cross_line)
Y_fitted_params, Y_fitted_data = fit(self.coor_Y_export, self.profiles_data_Y, yc, not cross_line)
qx_2_fit = self.coor_X_export
qz_2_fit = self.coor_Y_export
""" Plotting profiles """
self.profiles_ax1.cla()
self.profiles_ax2.cla()
self.profiles_ax1.format_coord = self.pro_format_coord
self.profiles_ax2.format_coord = self.pro_format_coord
#self.cursor_pro1 = Cursor(self.profiles_ax1, color='k', linewidth=1, useblit=True)
#self.cursor_pro2 = Cursor(self.profiles_ax2, color='k', linewidth=1, useblit=True)
self.profiles_ax1.plot(self.coor_Y_export, self.profiles_data_Y, color='blue', lw=3)
self.profiles_ax1.plot(qz_2_fit, Y_fitted_data, color='red', lw=1.5, alpha=0.8)
self.profiles_ax2.plot(self.coor_X_export, self.profiles_data_X, color='blue', lw=3)
self.profiles_ax2.plot(qx_2_fit, X_fitted_data, color='red', lw=1.5, alpha=0.8)
self.profiles_ax1.set_title("Qz profile", size=14)
self.profiles_ax2.set_title("Qx profile", size=14)
self.profiles_canvas.draw()
# Show the fitted results
self.Qz_fitted_y0.set_text("%.4f"%Y_fitted_params['y0'].value)
self.Qz_fitted_xc.set_text("%.4f"%Y_fitted_params['xc'].value)
self.Qz_fitted_A.set_text("%.4f"%Y_fitted_params['A'].value)
self.Qz_fitted_w.set_text("%.4f"%Y_fitted_params['w'].value)
self.Qz_fitted_mu.set_text("%.4f"%Y_fitted_params['mu'].value)
self.Qx_fitted_y0.set_text("%.4f"%X_fitted_params['y0'].value)
self.Qx_fitted_xc.set_text("%.4f"%X_fitted_params['xc'].value)
self.Qx_fitted_A.set_text("%.4f"%X_fitted_params['A'].value)
self.Qx_fitted_w.set_text("%.4f"%X_fitted_params['w'].value)
self.Qx_fitted_mu.set_text("%.4f"%X_fitted_params['mu'].value)
self.profiles_refresh()
self.canvas.draw()
def draw_pointed(self, x, y, finished=False):
#if len(self.lines)>0:
# self.clear_notes()
p=self.ax.plot(x,y,'ro')
self.points.append(p[0])
if finished:
l=self.ax.plot(self.arb_lines_X, self.arb_lines_Y, '--',linewidth=1.5, color='white')
self.lines.append(l[0])
self.canvas.draw()
def profiles_refresh(self):
""" """
if self.profiles_log_btn.get_active():
self.profiles_ax1.set_yscale('log')
self.profiles_ax2.set_yscale('log')
else:
self.profiles_ax1.set_yscale('linear')
self.profiles_ax2.set_yscale('linear')
self.profiles_canvas.draw()
#return
def profiles_update(self, widget):
self.profiles_refresh()
def profiles_export(self,widget):
""" Export X,Y profiles data in the same folder as the EDF image """
proX_fname = self.rsm.split(".")[0]+"_Qx_profile.dat"
proY_fname = self.rsm.split(".")[0]+"_Qz_profile.dat"
proX_export= np.vstack([self.coor_X_export, self.profiles_data_X])
proX_export=proX_export.T
proY_export= np.vstack([self.coor_Y_export, self.profiles_data_Y])
proY_export=proY_export.T
try:
np.savetxt(proX_fname, proX_export)
np.savetxt(proY_fname, proY_export)
self.popup_info('info','Data are successfully exported!')
except:
self.popup_info('error','ERROR! Data not exported!')
def on_press(self, event):
#******************** Plot X,Y cross profiles ***************************************************
if (event.inaxes == self.ax) and (event.button==3) and self.plotXYprofiles_btn.get_active():
x = event.xdata
y = event.ydata
xx=[]
yy=[]
xx.append(x)
yy.append(y)
self.clear_notes()
try:
self.largueur_int = float(self.int_range.get_text())
self.fitting_width = float(self.fitting_range.get_text())
self.plot_profiles(xx,yy,cross_line=True)
except:
self.popup_info("error","Please check that you have entered all the parameters correctly !")
#******************** Plot arbitrary profiles ***************************************************
elif (event.inaxes == self.ax) and (event.button==1) and (self.arbitrary_profiles_btn.get_active() or self.rectangle_profiles_btn.get_active()):
#self.clear_notes()
try:
self.largueur_int = float(self.int_range.get_text())
self.fitting_width = float(self.fitting_range.get_text())
except:
self.popup_info("error","Please check that you have entered all the parameters correctly !")
self.arb_line_points +=1
#print "Number of points clicked: ",self.arb_line_points
if self.arb_line_points>2:
self.clear_notes()
self.arb_line_points=1
x = event.xdata
y = event.ydata
self.arb_lines_X.append(x)
self.arb_lines_Y.append(y)
if len(self.arb_lines_X)<2:
finished=False
elif len(self.arb_lines_X)==2:
finished = True
self.draw_pointed(x,y,finished)#If finished clicking, connect the two points by a line
if finished:
self.plot_profiles(self.arb_lines_X, self.arb_lines_Y, cross_line=False)
self.arb_lines_X=[]
self.arb_lines_Y=[]
#self.canvas.draw()
#******************** Clear cross lines in the main image ****************************************
elif event.button==2:
self.clear_notes()
def profile_press(self, event):
""" Calculate thickness fringes """
if event.inaxes == self.profiles_ax1:
draw_fringes = True
ax = self.profiles_ax1
X_data = self.coor_Y_export
Y_data = self.profiles_data_Y
xlabel = r'$Q_z (nm^{-1})$'
title = "Linear regression of Qz fringes"
title_FFT = "Fast Fourier Transform of Qz profiles"
xlabel_FFT= "Period (nm)"
elif event.inaxes == self.profiles_ax2:
draw_fringes = True
ax = self.profiles_ax2
X_data = self.coor_X_export
Y_data = self.profiles_data_X
xlabel = r'$Q_x (nm^{-1})$'
title = "Linear regression of Qx fringes"
title_FFT = "Fast Fourier Transform of Qx profiles"
xlabel_FFT= "Period (nm)"
else:
draw_fringes = False
if draw_fringes and (event.button==1):
if len(self.profiles_fringes)>0:
self.profiles_fringes = np.asarray(self.profiles_fringes)
self.profiles_fringes = np.sort(self.profiles_fringes)
fringes_popup = PopUpFringes(self.profiles_fringes, xlabel, "Fringes order", title)
self.profiles_fringes=[]
self.clear_notes()
elif draw_fringes and (event.button == 3):
vline=ax.axvline(event.xdata, linewidth=2, color="green")
self.lines.append(vline)
self.profiles_fringes.append(event.xdata)
elif draw_fringes and event.button == 2:
XF,YF = Fourier(X_data, Y_data)
popup_window=PopUpImage(XF, YF, xlabel_FFT, "Normalized intensity", title_FFT)
self.profiles_canvas.draw()
#plt.clf()
def clear_notes(self):
"""
print "Number of notes: ",len(self.my_notes)
print "Number of lines: ",len(self.lines)
print "Number of points: ",len(self.points)
print "Number of polygons: ",len(self.polygons)
"""
if len(self.my_notes)>0:
for txt in self.my_notes:
txt.remove()
if len(self.lines)>0:
for line in self.lines:
line.remove()
if len(self.points)>0:
for p in self.points:
p.remove()
if len(self.polygons)>0:
for p in self.polygons:
p.remove()
self.canvas.draw()
self.my_notes = []
#self.profiles_notes = []
self.lines=[]
self.points=[]
self.polygons=[]
self.arb_lines_X=[]
self.arb_lines_Y=[]
self.arb_line_points = 0
def on_motion(self,event):
print "Mouse moved !"
if event.inaxes == self.ax and self.arbitrary_profiles_btn.get_active() and self.arb_line_points==1:
x = event.xdata
y = event.ydata
self.clear_notes()
line = self.ax.plot([self.arb_lines_X[0], x], [self.arb_lines_Y[0],y], 'ro-')
self.lines.append(line)
self.canvas.draw()
def on_release(self, event):
if event.inaxes == self.ax:
if self.mouse_moved==True:
self.mouse_moved = False
def popup_info(self,info_type,text):
""" info_type = WARNING, INFO, QUESTION, ERROR """
if info_type.upper() == "WARNING":
mess_type = gtk.MESSAGE_WARNING
elif info_type.upper() == "INFO":
mess_type = gtk.MESSAGE_INFO
elif info_type.upper() == "ERROR":
mess_type = gtk.MESSAGE_ERROR
elif info_type.upper() == "QUESTION":
mess_type = gtk.MESSAGE_QUESTION
self.warning=gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT, mess_type, gtk.BUTTONS_CLOSE,text)
self.warning.run()
self.warning.destroy()
#********************************************************************
# Functions for the Spec-HDF5 data conversion
#********************************************************************
def select_file(self,widget,path,label):
dialog = gtk.FileChooserDialog("Select file",None,gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.DATA_current_folder)
response = dialog.run()
if response == gtk.RESPONSE_OK:
file_choosen = dialog.get_filename()
path.set_text(file_choosen)
self.DATA_current_folder = os.path.dirname(file_choosen)
if label == "A":
self.attenuation_file = file_choosen.decode('utf8')
elif label == "S":
self.spec_file = file_choosen.decode('utf8')
elif label == "M":
self.mca_file = file_choosen.decode('utf8')
else:
pass
dialog.destroy()
def select_folder(self, widget, path, label):
dialog = gtk.FileChooserDialog(title="Select folder",action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.DATA_current_folder)
response=dialog.run()
if response==gtk.RESPONSE_OK:
folder=dialog.get_filename()
path.set_text(folder)
self.DATA_current_folder = folder.decode('utf8')
if label == "D":
self.des_folder = folder.decode('utf8')
else:
pass
dialog.destroy()
def HKL2Q(self,H,K,L,a):
""" Q// est dans la direction [110], Qz // [001]"""
Qx = H*np.sqrt(2.)/a
Qy = K*np.sqrt(2.)/a
Qz = L/a
return [Qx, Qy, Qz]
def loadAmap(self,scanid,specfile,mapData,retard):
try:
psdSize = float(self.t1_entry.get_text())
Nchannels = int(self.t2_entry.get_text())
psdMin = int(self.t5_entry1.get_text())
psdMax = int(self.t5_entry2.get_text())
psd0 = float(self.t3_entry.get_text())
pixelSize = psdSize/Nchannels
pixelPerDeg = float(self.t4_entry.get_text())
distance = pixelSize * pixelPerDeg / np.tan(np.radians(1.0)) # sample-detector distance in mm
psdor = self.t6_entry.get_active() #psd orientation (up, down, in, out)
if psdor == 0:
psdor = 'z+'
elif psdor == 1:
psdor = 'z-'
else:
psdor = 'unknown'
energy = float(self.e2_entry.get_text())
filter_data = self.attenuation_file
monitor_col = self.e5_entry.get_text()
foil_col = self.e4_entry.get_text()
monitor_ref = float(self.e6_entry.get_text())
#****************** Calculation ************************
headers, scan_kappa = SP.ReadSpec(specfile,scanid)
Eta = scan_kappa['Eta']
print Eta.shape
tth = headers['P'][0]
omega = headers['P'][1]
tth = float(tth)
omega = float(omega)
print "Del: %.2f, Eta: %.2f"%(tth,omega)
#Si = xu.materials.Si
hxrd = xu.HXRD(self.substrate.Q(self.in_plane), self.substrate.Q(self.out_of_plane), en = energy)
hxrd.Ang2Q.init_linear(psdor,psd0, Nchannels, distance=distance, pixelwidth=pixelSize, chpdeg=pixelPerDeg)
HKL = hxrd.Ang2HKL(omega, tth)
HKL = np.asarray(HKL)
HKL = HKL.astype(int)
print "HKL = ",HKL
H=K=L=np.zeros(shape=(0,Nchannels))
for i in range(len(Eta)):
om=Eta[i]
q=hxrd.Ang2HKL(om,tth,mat=self.substrate,dettype='linear')
H = np.vstack((H,q[0]))
K = np.vstack((K,q[1]))
L = np.vstack((L,q[2]))
filtre_foil = scan_kappa[foil_col]
filtre = filtre_foil.copy()
monitor= scan_kappa[monitor_col]
foil_data = np.loadtxt(filter_data)
for f in xrange(foil_data.shape[0]):
coef = filtre_foil == f
filtre[coef] = foil_data[f,1]
#print filtre
mapData = mapData + 1e-6
if retard:
for i in range(len(filtre)-1):
mapData[i+1] = mapData[i+1]*filtre[i]
else:
for i in range(len(filtre)):
mapData[i] = mapData[i]*filtre[i]
for i in range(len(monitor)):
mapData[i] = mapData[i]*monitor_ref/monitor[i]
mapData = mapData[:,psdMin:psdMax]
H = H[:,psdMin:psdMax]
K = K[:,psdMin:psdMax]
L = L[:,psdMin:psdMax]
########## Correction d'offset ###############
x,y=np.unravel_index(np.argmax(mapData),mapData.shape)
H_sub = H[x,y]
K_sub = K[x,y]
L_sub = L[x,y]
H_offset = HKL[0] - H_sub
K_offset = HKL[1] - K_sub
L_offset = HKL[2] - L_sub
H = H + H_offset
K = K + K_offset
L = L + L_offset
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
Q = self.HKL2Q(H, K, L, a)
return Q,mapData
except:
self.popup_info("warning", "Please make sure that you have correctly entered the all parameters.")
return None,None
def gtk_waiting(self):
while gtk.events_pending():
gtk.main_iteration()
def Change_Lab_Instrument(self, widget):
self.choosen_instrument = self.Instrument.get_active_text()
print "I choose ",self.choosen_instrument
if self.choosen_instrument == "Bruker":
self.XRDML_xrdml_file_txt.set_text("Select RAW file: ")
self.XRDML_xrdml_file_browse.set_label("Browse RAW file")
elif self.choosen_instrument == "PANalytical":
self.XRDML_xrdml_file_txt.set_text("Select XRDML file: ")
self.XRDML_xrdml_file_browse.set_label("Browse XRDML file")
def Convert_Lab_Source(self, widget):
print "Instrument chosen: ",self.choosen_instrument
if self.choosen_instrument == "Bruker":
self.Bruker2HDF()
elif self.choosen_instrument == "PANalytical":
self.XRDML2HDF()
def XRDML2HDF(self):
try:
xrdml_file = self.spec_file
energy = self.XRDML_energy.get_text()
HKL = self.XRDML_reflection.get_text()
if HKL == "":
self.offset_correction = False
else:
self.offset_correction = True
HKL = HKL.split()
HKL = np.asarray([int(i) for i in HKL])
substrate = self.XRDML_substrate.get_active_text()
if substrate == "-- other":
substrate = self.XRDML_substrate_other.get_text()
command = "self.substrate = xu.materials."+substrate
exec(command)
in_plane = self.XRDML_substrate_inplane.get_text()
out_of_plane = self.XRDML_substrate_outplane.get_text()
in_plane = in_plane.split()
self.in_plane = np.asarray([int(i) for i in in_plane])
out_of_plane = out_of_plane.split()
self.out_of_plane = np.asarray([int(i) for i in out_of_plane])
description = self.XRDML_description.get_text()
self.XRDML_show_info.set_text("Reading XRDML data ...")
self.gtk_waiting()
dataFile = xu.io.XRDMLFile(xrdml_file)
scan = dataFile.scan
#omega_exp = scan['Omega']
#tth_exp = scan['2Theta']
data = scan['detector']
omega,tth,psd = xu.io.getxrdml_map(xrdml_file)
if energy == "":
exp = xu.HXRD(self.substrate.Q(self.in_plane),self.substrate.Q(self.out_of_plane))
else:
energy = float(energy)
exp = xu.HXRD(self.substrate.Q(self.in_plane),self.substrate.Q(self.out_of_plane), en=energy)
[qx,qy,qz] = exp.Ang2Q(omega, tth)
mapData = psd.reshape(data.shape)
H = qy.reshape(data.shape)
K = qy.reshape(data.shape)
L = qz.reshape(data.shape)
########## Correction d'offset ###############
#if self.offset_correction:
#x,y=np.unravel_index(np.argmax(mapData),mapData.shape)
#omalign = omega_exp[x,y]
#ttalign = tth_exp[x,y]
#[omnominal, dummy, dummy, ttnominal] = exp.Q2Ang(self.substrate.Q(HKL))
#omalign, ttalign, p, cov = xu.analysis.fit_bragg_peak(omega, tth, psd, omalign, ttalign, exp, plot=False)
#[qx, qy, qz] = exp.Ang2Q(omega, tth, delta=[omalign - omnominal,ttalign - ttnominal])
if self.offset_correction:
x,y=np.unravel_index(np.argmax(mapData),mapData.shape)
H_sub = H[x,y]
K_sub = K[x,y]
L_sub = L[x,y]
H_offset = HKL[0] - H_sub
K_offset = HKL[1] - K_sub
L_offset = HKL[2] - L_sub
H = H + H_offset
K = K + K_offset
L = L + L_offset
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
Q = self.HKL2Q(H, K, L, a)
self.XRDML_show_info.set_text("XRDML data are successfully loaded.")
self.gtk_waiting()
if description == "":
no_description = True
description = "XRDML_Map"
else:
no_description = False
h5file = description+".h5"
info = "\nSaving file: %s"%(h5file)
self.XRDML_show_info.set_text(info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
s = h5file.create_group(description)
s.create_dataset('intensity', data=mapData, compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=description)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def Bruker2HDF(self):
try:
raw_file = self.spec_file
from MCA_GUI.Bruker import convert_raw_to_uxd,get_Bruker
uxd_file = raw_file.split(".")[0]+".uxd"
convert_raw_to_uxd(raw_file, uxd_file)
energy = self.XRDML_energy.get_text()
if energy == "":
energy = 8048
else:
energy = float(energy)
HKL = self.XRDML_reflection.get_text()
if HKL == "":
self.offset_correction = False
else:
self.offset_correction = True
HKL = HKL.split()
HKL = np.asarray([int(i) for i in HKL])
substrate = self.XRDML_substrate.get_active_text()
if substrate == "-- other":
substrate = self.XRDML_substrate_other.get_text()
command = "self.substrate = xu.materials."+substrate
exec(command)
description = self.XRDML_description.get_text()
self.XRDML_show_info.set_text("Reading Raw data ...")
self.gtk_waiting()
acos = np.arccos
asin = np.arcsin
sqrt = np.sqrt
pi = np.pi
lam = xu.lam2en(energy)/10 #nm
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
dataset = get_Bruker(uxd_file)
theta = dataset['omega']
dTheta = dataset['tth']
########## Correction d'offset ###############
#----Calcul de omega et 2 Theta théorique--> pour correction
if self.offset_correction:
H=HKL[0]
K=HKL[1]
L=HKL[2]
tilt = acos(L / sqrt(H**2 + K**2 + L**2))*180.0/pi
teta = asin(lam * sqrt(H**2 + K**2 + L**2) / (2.0 * a))*180.0/pi
dTheta_theorique = 2.0 * teta
omega_theorique = teta + tilt
x,y=np.unravel_index(np.argmax(dataset['data']),dataset['data'].shape)
dT_sub = dataset['tth'][x,y]
om_sub = dataset['omega'][x,y]
dT_offset=dTheta_theorique - dT_sub
om_offset=omega_theorique - om_sub
#--- Correction:
dTheta += dT_offset
theta += om_offset
psi = np.zeros(shape=theta.shape)
Qmod = np.zeros(shape=theta.shape)
Qx = np.zeros(shape=theta.shape)
Qz = np.zeros(shape=theta.shape)
psi = theta - dTheta/2.0
#print psi
Qmod = 2.0/lam * np.sin(np.radians(dTheta/2.0))
Qx = Qmod * np.sin(np.radians(psi))
Qz = Qmod * np.cos(np.radians(psi))
self.XRDML_show_info.set_text("Raw data are successfully loaded.")
self.gtk_waiting()
if description == "":
no_description = True
description = "RSM"
else:
no_description = False
h5file = description+".h5"
info = "\nSaving file: %s"%(h5file)
self.XRDML_show_info.set_text(info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
s = h5file.create_group(description)
s.create_dataset('intensity', data=dataset['data'], compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Qx, compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Qx, compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Qz, compression='gzip', compression_opts=9)
s.create_dataset('description', data=description)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def spec2HDF(self,widget):
try:
specfile = self.spec_file
mcafile = self.mca_file
scan_beg = int(self.c4_entry1.get_text())
scan_end = int(self.c4_entry2.get_text())
substrate = self.e1_entry.get_active_text()
if substrate == "-- other":
substrate = self.e1_entry_other.get_text()
command = "self.substrate = xu.materials."+substrate
exec(command)
scanid = range(scan_beg, scan_end+1)
self.show_info.set_text("Reading MCA data ...")
self.gtk_waiting()
allMaps = SP.ReadMCA2D_complete(mcafile)
description = self.c5_entry1.get_text()
retard = self.c6_entry.get_active()
total = len(allMaps)
total_maps_loaded = "Number of map(s) loaded: %d"%total
self.show_info.set_text(total_maps_loaded)
self.gtk_waiting()
if description == "":
no_description = True
else:
description = description.split(",")
no_description = False
for i in range(len(allMaps)):
scannumber = scanid[i]
scan_name = "Scan_%d"%scannumber
if no_description:
h5file = scan_name+".h5"
d = scan_name
else:
h5file = description[i].strip()+".h5"
d = description[i].strip()
info = "\nSaving file N# %d/%d: %s"%(i+1,total,h5file)
out_info = total_maps_loaded + info
self.show_info.set_text(out_info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
Q,mapdata = self.loadAmap(scannumber, specfile, allMaps[i], retard)
s = h5file.create_group(scan_name)
s.create_dataset('intensity', data=mapdata, compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=d)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def Export_HQ_Image(self, widget):
dialog = gtk.FileChooserDialog(title="Save image", action=gtk.FILE_CHOOSER_ACTION_SAVE, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
filename = self.rsm_choosen.split(".")[0] if self.rsm_choosen != "" else "Img"
dialog.set_current_name(filename+".png")
#dialog.set_filename(filename)
dialog.set_current_folder(self.GUI_current_folder)
filtre = gtk.FileFilter()
filtre.set_name("images")
filtre.add_pattern("*.png")
filtre.add_pattern("*.jpg")
filtre.add_pattern("*.pdf")
filtre.add_pattern("*.ps")
filtre.add_pattern("*.eps")
dialog.add_filter(filtre)
filtre = gtk.FileFilter()
filtre.set_name("Other")
filtre.add_pattern("*")
dialog.add_filter(filtre)
response = dialog.run()
if response==gtk.RESPONSE_OK:
#self.fig.savefig(dialog.get_filename())
xlabel = r'$Q_x (nm^{-1})$'
ylabel = r'$Q_z (nm^{-1})$'
fig = plt.figure(figsize=(10,8),dpi=100)
ax = fig.add_axes([0.1,0.2,0.7,0.7])
cax = fig.add_axes([0.85,0.2,0.03,0.7])
clabel = r'$Intensity\ (Counts\ per\ second)$'
fmt = "%d"
if self.linear_scale_btn.get_active():
clabel = r'$Log_{10}\ (Intensity)\ [arb.\ units]$'
fmt = "%.2f"
data = self.gridder.data.T
data = flat_data(data, self.vmin, self.vmax, self.linear_scale_btn.get_active())
img = ax.contourf(self.gridder.xaxis, self.gridder.yaxis, data, 100, vmin=self.vmin*1.05, vmax=self.vmax)
cb = fig.colorbar(img,cax=cax, format=fmt)
cb.set_label(clabel, fontsize=20)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.yaxis.label.set_size(20)
ax.xaxis.label.set_size(20)
ax.set_title(self.rsm_description,fontsize=20)
fig.savefig(dialog.get_filename())
plt.close()
dialog.destroy()
if __name__=="__main__":
MyMainWindow()
gtk.main()
| gpl-2.0 |
williampma/opencog | opencog/python/spatiotemporal/temporal_events/animation.py | 34 | 4896 | from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from numpy.core.multiarray import zeros
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.time_intervals import TimeInterval
from matplotlib import pyplot as plt
from matplotlib import animation
__author__ = 'keyvan'
x_axis = xrange(13)
zeros_13 = zeros(13)
class Animation(object):
def __init__(self, event_a, event_b, event_c, plt=plt):
self.event_a = event_a
self.event_c = event_c
self.event_b_length_beginning = event_b.beginning - event_b.a
self.event_b_length_middle = self.event_b_length_beginning + event_b.ending - event_b.beginning
self.event_b_length_total = event_b.b - event_b.ending
self.plt = plt
self.fig = plt.figure(1)
self.ax_a_b = self.fig.add_subplot(4, 1, 1)
self.ax_b_c = self.fig.add_subplot(4, 1, 2)
self.ax_a_c = self.fig.add_subplot(4, 1, 3)
self.ax_relations = self.fig.add_subplot(4, 1, 4)
self.ax_a_b.set_xlim(0, 13)
self.ax_a_b.set_ylim(0, 1)
self.ax_b_c.set_xlim(0, 13)
self.ax_b_c.set_ylim(0, 1)
self.ax_a_c.set_xlim(0, 13)
self.ax_a_c.set_ylim(0, 1)
self.rects_a_b = self.ax_a_b.bar(x_axis, zeros_13)
self.rects_b_c = self.ax_b_c.bar(x_axis, zeros_13)
self.rects_a_c = self.ax_a_c.bar(x_axis, zeros_13)
self.line_a = Line2D([], [])
self.line_b = Line2D([], [])
self.line_c = Line2D([], [])
self.ax_relations.add_line(self.line_a)
self.ax_relations.add_line(self.line_b)
self.ax_relations.add_line(self.line_c)
a = min(event_a.a, event_c.a) - self.event_b_length_total
b = max(event_a.b, event_c.b)
self.ax_relations.set_xlim(a, b + self.event_b_length_total)
self.ax_relations.set_ylim(0, 1.1)
# self.interval = TimeInterval(a, b, 150)
self.interval = TimeInterval(a, b, 2)
self.ax_a_b.xaxis.set_minor_formatter(self.ax_a_b.xaxis.get_major_formatter())
self.ax_a_b.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_b.xaxis.set_ticklabels('poDedOP')
self.ax_a_b.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_b_c.xaxis.set_minor_formatter(self.ax_b_c.xaxis.get_major_formatter())
self.ax_b_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_b_c.xaxis.set_ticklabels('poDedOP')
self.ax_b_c.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_a_c.xaxis.set_minor_formatter(self.ax_a_c.xaxis.get_major_formatter())
self.ax_a_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_c.xaxis.set_ticklabels('poDedOP')
self.ax_a_c.xaxis.set_ticklabels('mFsSfM', minor=True)
def init(self):
artists = []
self.line_a.set_data(self.event_a, self.event_a.membership_function)
self.line_b.set_data([], [])
self.line_c.set_data(self.event_c, self.event_c.membership_function)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
for rect, h in zip(self.rects_a_b, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_a_c, (self.event_a * self.event_c).to_list()):
rect.set_height(h)
artists.append(rect)
return artists
def animate(self, t):
interval = self.interval
B = TemporalEventTrapezium(interval[t], interval[t] + self.event_b_length_total,
interval[t] + self.event_b_length_beginning,
interval[t] + self.event_b_length_middle)
plt.figure()
B.plot().show()
a_b = (self.event_a * B).to_list()
b_c = (B * self.event_c).to_list()
self.line_b.set_data(B, B.membership_function)
artists = []
for rect, h in zip(self.rects_a_b, a_b):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, b_c):
rect.set_height(h)
artists.append(rect)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
return artists
def show(self):
fr = len(self.interval) - 1
anim = animation.FuncAnimation(self.fig, self.animate, init_func=self.init,
frames=fr, interval=fr, blit=True)
self.plt.show()
if __name__ == '__main__':
anim = Animation(TemporalEventTrapezium(4, 8, 5, 7),
TemporalEventTrapezium(0, 10, 6, 9),
TemporalEventTrapezium(0.5, 11, 1, 3))
# anim.show()
| agpl-3.0 |
DarkEnergyScienceCollaboration/WeakLensingDeblending | fisher.py | 2 | 10873 | #!/usr/bin/env python
"""Create plots to illustrate galaxy parameter error estimation using Fisher matrices.
"""
from __future__ import print_function, division
import argparse
import numpy as np
import matplotlib.pyplot as plt
import astropy.table
import descwl
def main():
# Initialize and parse command-line arguments.
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--verbose', action = 'store_true',
help = 'Provide verbose output.')
descwl.output.Reader.add_args(parser)
parser.add_argument('--no-display', action = 'store_true',
help = 'Do not display the image on screen.')
parser.add_argument('-o','--output-name',type = str, default = None, metavar = 'FILE',
help = 'Name of the output file to write.')
parser.add_argument('--galaxy', type = int,
default = None, metavar = 'ID',
help = 'Use the galaxy with this database ID, ignoring any overlaps.')
parser.add_argument('--group', type = int,
default = None, metavar = 'ID',
help = 'Use the overlapping group of galaxies with this group ID.')
parser.add_argument('--partials', action = 'store_true',
help = 'Show partial derivative images (instead of Fisher matrix images).')
parser.add_argument('--matrix', action = 'store_true',
help = 'Show summed Fisher matrix elements (instead of Fisher matrix images).')
parser.add_argument('--covariance', action = 'store_true',
help = 'Show covariance matrix elements (instead of Fisher matrix images).')
parser.add_argument('--correlation', action = 'store_true',
help = 'Show correlation matrix elements (instead of Fisher matrix images).')
display_group = parser.add_argument_group('Display options')
display_group.add_argument('--figure-size', type = float,
default = 12., metavar = 'SIZE',
help = 'Size of the longest edge of the figure in inches.')
display_group.add_argument('--colormap', type = str,
default = 'RdBu', metavar = 'CMAP',
help = 'Matplotlib colormap name to use.')
display_group.add_argument('--no-labels', action = 'store_true',
help = 'Do not display any text labels.')
display_group.add_argument('--label-color', type = str,
default = 'greenyellow', metavar = 'COL',
help = 'Matplotlib color name to use for label text.')
display_group.add_argument('--label-size', type = str,
default = 'medium', metavar = 'SIZE',
help = 'Matplotlib font size specification in points or relative (small,large,...)')
display_group.add_argument('--value-format', type = str,
default = '%.3g', metavar = 'FMT',
help = 'Printf format to use for matrix element values.')
display_group.add_argument('--clip-percentile', type = float,
default = 10.0, metavar = 'PCT',
help = 'Percentile level for clipping color scale.')
args = parser.parse_args()
if args.no_display and not args.output_name:
print('No display our output requested.')
return 0
if args.galaxy is None and args.group is None:
print('Must specify either a galaxy or a group.')
return -1
if args.galaxy is not None and args.group is not None:
print('Cannot specify both a galaxy and a group.')
return -1
if args.partials + args.matrix + args.covariance + args.correlation > 1:
print('Can only specify one of the partials,matrix,covariance options.')
return -1
if args.clip_percentile < 0 or args.clip_percentile >= 50:
print('Invalid --clip-percentile %f (should be 0-50).' % args.clip_percentile)
return -1
# Load the analysis results file we will get partial derivative images from.
try:
reader = descwl.output.Reader.from_args(defer_stamp_loading = True,args = args)
results = reader.results
npartials = len(results.slice_labels)
if args.verbose:
print(results.survey.description())
except RuntimeError as e:
print(str(e))
return -1
if results.table is None:
print('Input file is missing a results catalog.')
return -1
if results.stamps is None:
print('Input file is missing stamp datacubes.')
return -1
# Look for the selected galaxy or group.
if args.galaxy:
selected = results.select('db_id==%d' % args.galaxy)
if len(selected) == 0:
print('No such galaxy with ID %d' % args.galaxy)
return -1
title = 'galaxy-%d' % args.galaxy
else:
selected = results.select('grp_id==%d' % args.group)
if len(selected) == 0:
print('No such group with ID %d' % args.group)
return -1
title = 'group-%d' % args.group
# Sort selected galaxies in increasing rank order.
sort_order = np.argsort(results.table['grp_rank'][selected])
selected = selected[sort_order]
num_selected = len(selected)
npar = npartials*num_selected
nrows,ncols = npar,npar
# Get the background image for these galaxies.
background = results.get_subimage(selected)
height,width = background.array.shape
# Calculate matrix elements.
fisher,covariance,variance,correlation = results.get_matrices(selected)
show_matrix = args.matrix or args.covariance or args.correlation
if show_matrix:
if args.matrix:
matrix = fisher
elif args.covariance:
matrix = covariance
else:
matrix = correlation
# Print a summary table of RMS errors on each parameter.
if args.verbose and correlation is not None:
dtypes = [ (name,np.float32) for name in results.slice_labels ]
dtypes.insert(0,('id',np.int64))
summary = np.empty(shape = (len(selected),),dtype = dtypes)
summary['id'] = results.table['db_id'][selected]
for index in range(ncols):
galaxy = index//npartials
islice = index%npartials
summary[galaxy][islice+1] = np.sqrt(variance[index])
print(astropy.table.Table(summary))
# Calculate the bounds for our figure.
if args.partials:
nrows = 1
figure_scale = args.figure_size/(ncols*max(height,width))
figure_width = ncols*width*figure_scale
figure_height = nrows*height*figure_scale
figure = plt.figure(figsize = (figure_width,figure_height),frameon=False)
figure.canvas.set_window_title(title)
plt.subplots_adjust(left = 0,bottom = 0,right = 1,top = 1,wspace = 0,hspace = 0)
def draw(row,col,pixels):
axes = plt.subplot(nrows,ncols,row*ncols+col+1)
axes.set_axis_off()
if row == col:
# All values are positive.
vmax = np.percentile(pixels[pixels != 0], 100 - args.clip_percentile)
else:
vmax = np.max(np.fabs(np.percentile(pixels[pixels != 0],
(args.clip_percentile, 100 - args.clip_percentile))))
vmin = -vmax
scaled = np.clip(pixels,vmin,vmax)
plt.imshow(scaled,origin = 'lower',interpolation = 'nearest',
cmap = args.colormap,vmin = vmin,vmax = vmax)
def draw_param_label(index,row,col):
# index determines which parameter label to draw.
# row,col determine where the label will be drawn.
islice = index%npartials
igalaxy = index//npartials
rank = results.table['grp_rank'][selected[igalaxy]]
# Latex labels do not get the correct vertical alignment.
##param_label = '$%s_{%d}$' % (results.slice_labels[islice],rank)
param_label = '%s_%d' % (results.slice_labels[islice],rank)
x = (col+1.)/ncols
y = 1.-float(row)/nrows
plt.annotate(param_label,xy = (x,y),xycoords = 'figure fraction',
color = args.label_color, fontsize = args.label_size,
horizontalalignment = 'right',verticalalignment = 'top')
if args.partials:
# Draw the partial-derivative images on a single row.
stamp = results.get_subimage(selected)
for col in range(ncols):
galaxy = selected[col//npartials]
islice = col%npartials
stamp.array[:] = 0.
stamp[results.bounds[galaxy]] = results.get_stamp(galaxy,islice)
if islice == 0:
# Normalize to give partial with respect to added flux in electrons.
stamp /= results.table['flux'][galaxy]
draw(0,col,stamp.array)
if not args.no_labels:
draw_param_label(index=col,row=0,col=col)
elif show_matrix:
# Draw the values of the matrix we calculated above.
span = np.arange(nrows)
row,col = np.meshgrid(span,span)
lower_triangle = np.ma.masked_where(row > col,matrix)
axes = plt.subplot(1,1,1)
axes.set_axis_off()
vmin,vmax = (-1.,+1.) if args.correlation else (None,None)
plt.imshow(lower_triangle,interpolation = 'nearest',aspect = 'auto',
cmap = args.colormap,vmin = vmin,vmax = vmax)
if not args.no_labels:
for row in range(nrows):
for col in range(row+1):
value_label = args.value_format % matrix[row,col]
xc = (col+0.5)/ncols
yc = 1.-(row+0.5)/nrows
plt.annotate(value_label,xy = (xc,yc),xycoords = 'figure fraction',
color = args.label_color, fontsize = args.label_size,
horizontalalignment = 'center',verticalalignment = 'center')
if row == col and not args.no_labels:
draw_param_label(index=row,row=row,col=col)
else:
# Draw Fisher-matrix images.
stamp = background.copy()
for row,index1 in enumerate(selected):
for col,index2 in enumerate(selected[:row+1]):
images,overlap = results.get_fisher_images(index1,index2,background)
if overlap is None:
continue
for par1 in range(npartials):
fisher_row = npartials*row+par1
for par2 in range(npartials):
fisher_col = npartials*col+par2
if fisher_col > fisher_row:
continue
stamp.array[:] = 0.
stamp[overlap].array[:] = images[par1,par2]
draw(fisher_row,fisher_col,stamp.array)
if fisher_row == fisher_col and not args.no_labels:
draw_param_label(index = fisher_row,row = fisher_row,col = fisher_col)
if args.output_name:
figure.savefig(args.output_name)
if not args.no_display:
plt.show()
if __name__ == '__main__':
main()
| mit |
mne-tools/mne-python | mne/viz/epochs.py | 1 | 43884 | """Functions to plot epochs data."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Jaakko Leppakangas <[email protected]>
# Jona Sassenhagen <[email protected]>
# Stefan Repplinger <[email protected]>
# Daniel McCloy <[email protected]>
#
# License: Simplified BSD
from collections import Counter
from copy import deepcopy
import warnings
import numpy as np
from .raw import _setup_channel_selections
from ..defaults import _handle_default
from ..utils import verbose, logger, warn, fill_doc, _check_option
from ..io.meas_info import create_info, _validate_type
from ..io.pick import (_get_channel_types, _picks_to_idx, _DATA_CH_TYPES_SPLIT,
_VALID_CHANNEL_TYPES)
from .utils import (tight_layout, _setup_vmin_vmax, plt_show, _check_cov,
_compute_scalings, DraggableColorbar, _setup_cmap,
_handle_decim, _set_title_multiple_electrodes,
_make_combine_callable, _set_window_title,
_make_event_color_dict, _get_channel_plotting_order)
@fill_doc
def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap=None, fig=None,
axes=None, overlay_times=None, combine=None,
group_by=None, evoked=True, ts_args=None, title=None,
clear=False):
"""Plot Event Related Potential / Fields image.
Parameters
----------
epochs : instance of Epochs
The epochs.
%(picks_good_data)s
``picks`` interacts with ``group_by`` and ``combine`` to determine the
number of figures generated; see Notes.
sigma : float
The standard deviation of a Gaussian smoothing window applied along
the epochs axis of the image. If 0, no smoothing is applied.
Defaults to 0.
vmin : None | float | callable
The min value in the image (and the ER[P/F]). The unit is µV for
EEG channels, fT for magnetometers and fT/cm for gradiometers.
If vmin is None and multiple plots are returned, the limit is
equalized within channel types.
Hint: to specify the lower limit of the data, use
``vmin=lambda data: data.min()``.
vmax : None | float | callable
The max value in the image (and the ER[P/F]). The unit is µV for
EEG channels, fT for magnetometers and fT/cm for gradiometers.
If vmin is None and multiple plots are returned, the limit is
equalized within channel types.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not ``None``, order is used to reorder the epochs along the y-axis
of the image. If it is an array of :class:`int`, its length should
match the number of good epochs. If it is a callable it should accept
two positional parameters (``times`` and ``data``, where
``data.shape == (len(good_epochs), len(times))``) and return an
:class:`array <numpy.ndarray>` of indices that will sort ``data`` along
its first axis.
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``units=dict(eeg='µV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)``.
cmap : None | colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ('RdBu_r', True).
If None, "RdBu_r" is used, unless the data is all positive, in which
case "Reds" is used.
fig : Figure | None
:class:`~matplotlib.figure.Figure` instance to draw the image to.
Figure must contain the correct number of axes for drawing the epochs
image, the evoked response, and a colorbar (depending on values of
``evoked`` and ``colorbar``). If ``None`` a new figure is created.
Defaults to ``None``.
axes : list of Axes | dict of list of Axes | None
List of :class:`~matplotlib.axes.Axes` objects in which to draw the
image, evoked response, and colorbar (in that order). Length of list
must be 1, 2, or 3 (depending on values of ``colorbar`` and ``evoked``
parameters). If a :class:`dict`, each entry must be a list of Axes
objects with the same constraints as above. If both ``axes`` and
``group_by`` are dicts, their keys must match. Providing non-``None``
values for both ``fig`` and ``axes`` results in an error. Defaults to
``None``.
overlay_times : array_like, shape (n_epochs,) | None
Times (in seconds) at which to draw a line on the corresponding row of
the image (e.g., a reaction time associated with each epoch). Note that
``overlay_times`` should be ordered to correspond with the
:class:`~mne.Epochs` object (i.e., ``overlay_times[0]`` corresponds to
``epochs[0]``, etc).
%(combine)s
If callable, the callable must accept one positional input (data of
shape ``(n_epochs, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_epochs, n_times)``. For
example::
combine = lambda data: np.median(data, axis=1)
If ``combine`` is ``None``, channels are combined by computing GFP,
unless ``group_by`` is also ``None`` and ``picks`` is a list of
specific channels (not channel types), in which case no combining is
performed and each channel gets its own figure. See Notes for further
details. Defaults to ``None``.
group_by : None | dict
Specifies which channels are aggregated into a single figure, with
aggregation method determined by the ``combine`` parameter. If not
``None``, one :class:`~matplotlib.figure.Figure` is made per dict
entry; the dict key will be used as the figure title and the dict
values must be lists of picks (either channel names or integer indices
of ``epochs.ch_names``). For example::
group_by=dict(Left_ROI=[1, 2, 3, 4], Right_ROI=[5, 6, 7, 8])
Note that within a dict entry all channels must have the same type.
``group_by`` interacts with ``picks`` and ``combine`` to determine the
number of figures generated; see Notes. Defaults to ``None``.
evoked : bool
Draw the ER[P/F] below the image or not.
ts_args : None | dict
Arguments passed to a call to `~mne.viz.plot_compare_evokeds` to style
the evoked plot below the image. Defaults to an empty dictionary,
meaning `~mne.viz.plot_compare_evokeds` will be called with default
parameters.
title : None | str
If :class:`str`, will be plotted as figure title. Otherwise, the
title will indicate channel(s) or channel type being plotted. Defaults
to ``None``.
clear : bool
Whether to clear the axes before plotting (if ``fig`` or ``axes`` are
provided). Defaults to ``False``.
Returns
-------
figs : list of Figure
One figure per channel, channel type, or group, depending on values of
``picks``, ``group_by``, and ``combine``. See Notes.
Notes
-----
You can control how channels are aggregated into one figure or plotted in
separate figures through a combination of the ``picks``, ``group_by``, and
``combine`` parameters. If ``group_by`` is a :class:`dict`, the result is
one :class:`~matplotlib.figure.Figure` per dictionary key (for any valid
values of ``picks`` and ``combine``). If ``group_by`` is ``None``, the
number and content of the figures generated depends on the values of
``picks`` and ``combine``, as summarized in this table:
.. cssclass:: table-bordered
.. rst-class:: midvalign
+----------+----------------------------+------------+-------------------+
| group_by | picks | combine | result |
+==========+============================+============+===================+
| | None, int, list of int, | None, | |
| dict | ch_name, list of ch_names, | string, or | 1 figure per |
| | ch_type, list of ch_types | callable | dict key |
+----------+----------------------------+------------+-------------------+
| | None, | None, | |
| | ch_type, | string, or | 1 figure per |
| | list of ch_types | callable | ch_type |
| None +----------------------------+------------+-------------------+
| | int, | None | 1 figure per pick |
| | ch_name, +------------+-------------------+
| | list of int, | string or | 1 figure |
| | list of ch_names | callable | |
+----------+----------------------------+------------+-------------------+
"""
from scipy.ndimage import gaussian_filter1d
from .. import EpochsArray
_validate_type(group_by, (dict, None), 'group_by')
units = _handle_default('units', units)
scalings = _handle_default('scalings', scalings)
if set(units) != set(scalings):
raise ValueError('Scalings and units must have the same keys.')
# is picks a channel type (or None)?
picks, picked_types = _picks_to_idx(epochs.info, picks, return_kind=True)
ch_types = _get_channel_types(epochs.info, picks)
# `combine` defaults to 'gfp' unless picks are specific channels and
# there was no group_by passed
combine_given = combine is not None
if combine is None and (group_by is not None or picked_types):
combine = 'gfp'
# convert `combine` into callable (if None or str)
combine_func = _make_combine_callable(combine)
# handle ts_args (params for the evoked time series)
ts_args = dict() if ts_args is None else ts_args
manual_ylims = 'ylim' in ts_args
if combine is not None:
ts_args['show_sensors'] = False
vlines = [0] if (epochs.times[0] < 0 < epochs.times[-1]) else []
ts_defaults = dict(colors={'cond': 'k'}, title='', show=False,
truncate_yaxis=False, truncate_xaxis=False,
vlines=vlines, legend=False)
ts_defaults.update(**ts_args)
ts_args = ts_defaults.copy()
# construct a group_by dict if one wasn't supplied
if group_by is None:
if picked_types:
# one fig per ch_type
group_by = {ch_type: picks[np.array(ch_types) == ch_type]
for ch_type in set(ch_types)
if ch_type in _DATA_CH_TYPES_SPLIT}
elif combine is None:
# one fig per pick
group_by = {epochs.ch_names[pick]: [pick] for pick in picks}
else:
# one fig to rule them all
ch_names = np.array(epochs.ch_names)[picks].tolist()
key = _set_title_multiple_electrodes(None, combine, ch_names)
group_by = {key: picks}
else:
group_by = deepcopy(group_by)
# check for heterogeneous sensor type combinations / "combining" 1 channel
for this_group, these_picks in group_by.items():
this_ch_type = np.array(ch_types)[np.in1d(picks, these_picks)]
if len(set(this_ch_type)) > 1:
types = ', '.join(set(this_ch_type))
raise ValueError('Cannot combine sensors of different types; "{}" '
'contains types {}.'.format(this_group, types))
# now we know they're all the same type...
group_by[this_group] = dict(picks=these_picks, ch_type=this_ch_type[0],
title=title)
# are they trying to combine a single channel?
if len(these_picks) < 2 and combine_given:
warn('Only one channel in group "{}"; cannot combine by method '
'"{}".'.format(this_group, combine))
# check for compatible `fig` / `axes`; instantiate figs if needed; add
# fig(s) and axes into group_by
group_by = _validate_fig_and_axes(fig, axes, group_by, evoked, colorbar,
clear=clear)
# prepare images in advance to get consistent vmin/vmax.
# At the same time, create a subsetted epochs object for each group
data = epochs.get_data()
vmin_vmax = {ch_type: dict(images=list(), norm=list())
for ch_type in set(ch_types)}
for this_group, this_group_dict in group_by.items():
these_picks = this_group_dict['picks']
this_ch_type = this_group_dict['ch_type']
this_ch_info = [epochs.info['chs'][n] for n in these_picks]
these_ch_names = np.array(epochs.info['ch_names'])[these_picks]
this_data = data[:, these_picks]
# create subsetted epochs object
this_info = create_info(sfreq=epochs.info['sfreq'],
ch_names=list(these_ch_names),
ch_types=[this_ch_type] * len(these_picks))
this_info['chs'] = this_ch_info
this_epochs = EpochsArray(this_data, this_info, tmin=epochs.times[0])
# apply scalings (only to image, not epochs object), combine channels
this_image = combine_func(this_data * scalings[this_ch_type])
# handle `order`. NB: this can potentially yield different orderings
# in each figure!
this_image, _overlay_times = _order_epochs(this_image, epochs.times,
order, overlay_times)
this_norm = np.all(this_image > 0)
# apply smoothing
if sigma > 0.:
this_image = gaussian_filter1d(this_image, sigma=sigma, axis=0,
mode='nearest')
# update the group_by and vmin_vmax dicts
group_by[this_group].update(image=this_image, epochs=this_epochs,
norm=this_norm)
vmin_vmax[this_ch_type]['images'].append(this_image)
vmin_vmax[this_ch_type]['norm'].append(this_norm)
# compute overall vmin/vmax for images
for ch_type, this_vmin_vmax_dict in vmin_vmax.items():
image_list = this_vmin_vmax_dict['images']
image_stack = np.stack(image_list)
norm = all(this_vmin_vmax_dict['norm'])
vmin_vmax[ch_type] = _setup_vmin_vmax(image_stack, vmin, vmax, norm)
del image_stack, vmin, vmax
# prepare to plot
auto_ylims = {ch_type: [0., 0.] for ch_type in set(ch_types)}
# plot
for this_group, this_group_dict in group_by.items():
this_ch_type = this_group_dict['ch_type']
this_axes_dict = this_group_dict['axes']
vmin, vmax = vmin_vmax[this_ch_type]
# plot title
if this_group_dict['title'] is None:
title = _handle_default('titles').get(this_group, this_group)
if isinstance(combine, str) and len(title):
_comb = combine.upper() if combine == 'gfp' else combine
_comb = 'std. dev.' if _comb == 'std' else _comb
title += f' ({_comb})'
# plot the image
this_fig = _plot_epochs_image(
this_group_dict['image'], epochs=this_group_dict['epochs'],
picks=picks, colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
style_axes=True, norm=this_group_dict['norm'],
unit=units[this_ch_type], ax=this_axes_dict, show=False,
title=title, combine=combine, combine_given=combine_given,
overlay_times=_overlay_times, evoked=evoked, ts_args=ts_args)
group_by[this_group].update(fig=this_fig)
# detect ylims across figures
if evoked and not manual_ylims:
# ensure get_ylim works properly
this_axes_dict['evoked'].figure.canvas.draw_idle()
this_bot, this_top = this_axes_dict['evoked'].get_ylim()
this_min = min(this_bot, this_top)
this_max = max(this_bot, this_top)
curr_min, curr_max = auto_ylims[ch_type]
auto_ylims[this_ch_type] = [min(curr_min, this_min),
max(curr_max, this_max)]
# equalize ylims across figures (does not adjust ticks)
if evoked:
for this_group_dict in group_by.values():
ax = this_group_dict['axes']['evoked']
ch_type = this_group_dict['ch_type']
if not manual_ylims:
args = auto_ylims[ch_type]
if 'invert_y' in ts_args:
args = args[::-1]
ax.set_ylim(*args)
plt_show(show)
# impose deterministic order of returned objects
return_order = np.array(sorted(group_by))
are_ch_types = np.in1d(return_order, _VALID_CHANNEL_TYPES)
if any(are_ch_types):
return_order = np.concatenate((return_order[are_ch_types],
return_order[~are_ch_types]))
return [group_by[group]['fig'] for group in return_order]
def _validate_fig_and_axes(fig, axes, group_by, evoked, colorbar, clear=False):
"""Check user-provided fig/axes compatibility with plot_epochs_image."""
from matplotlib.pyplot import figure, Axes, subplot2grid
n_axes = 1 + int(evoked) + int(colorbar)
ax_names = ('image', 'evoked', 'colorbar')
ax_names = np.array(ax_names)[np.where([True, evoked, colorbar])]
prefix = 'Since evoked={} and colorbar={}, '.format(evoked, colorbar)
# got both fig and axes
if fig is not None and axes is not None:
raise ValueError('At least one of "fig" or "axes" must be None; got '
'fig={}, axes={}.'.format(fig, axes))
# got fig=None and axes=None: make fig(s) and axes
if fig is None and axes is None:
axes = dict()
colspan = 9 if colorbar else 10
rowspan = 2 if evoked else 3
shape = (3, 10)
for this_group in group_by:
this_fig = figure()
_set_window_title(this_fig, this_group)
subplot2grid(shape, (0, 0), colspan=colspan, rowspan=rowspan,
fig=this_fig)
if evoked:
subplot2grid(shape, (2, 0), colspan=colspan, rowspan=1,
fig=this_fig)
if colorbar:
subplot2grid(shape, (0, 9), colspan=1, rowspan=rowspan,
fig=this_fig)
axes[this_group] = this_fig.axes
# got a Figure instance
if fig is not None:
# If we're re-plotting into a fig made by a previous call to
# `plot_image`, be forgiving of presence/absence of sensor inset axis.
if len(fig.axes) not in (n_axes, n_axes + 1):
raise ValueError('{}"fig" must contain {} axes, got {}.'
''.format(prefix, n_axes, len(fig.axes)))
if len(list(group_by)) != 1:
raise ValueError('When "fig" is not None, "group_by" can only '
'have one group (got {}: {}).'
.format(len(group_by), ', '.join(group_by)))
key = list(group_by)[0]
if clear: # necessary if re-plotting into previous figure
_ = [ax.clear() for ax in fig.axes]
if len(fig.axes) > n_axes: # get rid of sensor inset
fig.axes[-1].remove()
_set_window_title(fig, key)
axes = {key: fig.axes}
# got an Axes instance, be forgiving (if evoked and colorbar are False)
if isinstance(axes, Axes):
axes = [axes]
# got an ndarray; be forgiving
if isinstance(axes, np.ndarray):
axes = axes.ravel().tolist()
# got a list of axes, make it a dict
if isinstance(axes, list):
if len(axes) != n_axes:
raise ValueError('{}"axes" must be length {}, got {}.'
''.format(prefix, n_axes, len(axes)))
# for list of axes to work, must be only one group
if len(list(group_by)) != 1:
raise ValueError('When axes is a list, can only plot one group '
'(got {} groups: {}).'
.format(len(group_by), ', '.join(group_by)))
key = list(group_by)[0]
axes = {key: axes}
# got a dict of lists of axes, make it dict of dicts
if isinstance(axes, dict):
# in theory a user could pass a dict of axes but *NOT* pass a group_by
# dict, but that is forbidden in the docstring so it shouldn't happen.
# The next test could fail in that case because we've constructed a
# group_by dict and the user won't have known what keys we chose.
if set(axes) != set(group_by):
raise ValueError('If "axes" is a dict its keys ({}) must match '
'the keys in "group_by" ({}).'
.format(list(axes), list(group_by)))
for this_group, this_axes_list in axes.items():
if len(this_axes_list) != n_axes:
raise ValueError('{}each value in "axes" must be a list of {} '
'axes, got {}.'.format(prefix, n_axes,
len(this_axes_list)))
# NB: next line assumes all axes in each list are in same figure
group_by[this_group]['fig'] = this_axes_list[0].get_figure()
group_by[this_group]['axes'] = {key: axis for key, axis in
zip(ax_names, this_axes_list)}
return group_by
def _order_epochs(data, times, order=None, overlay_times=None):
"""Sort epochs image data (2D). Helper for plot_epochs_image."""
n_epochs = len(data)
if overlay_times is not None:
if len(overlay_times) != n_epochs:
raise ValueError(
f'size of overlay_times parameter ({len(overlay_times)}) does '
f'not match the number of epochs ({n_epochs}).')
overlay_times = np.array(overlay_times)
times_min = np.min(overlay_times)
times_max = np.max(overlay_times)
if (times_min < times[0]) or (times_max > times[-1]):
warn('Some values in overlay_times fall outside of the epochs '
f'time interval (between {times[0]} s and {times[-1]} s)')
if callable(order):
order = order(times, data)
if order is not None:
if len(order) != n_epochs:
raise ValueError(f'If order is a {type(order).__name__}, its '
f'length ({len(order)}) must match the length of '
f'the data ({n_epochs}).')
order = np.array(order)
data = data[order]
if overlay_times is not None:
overlay_times = overlay_times[order]
return data, overlay_times
def _plot_epochs_image(image, style_axes=True, epochs=None, picks=None,
vmin=None, vmax=None, colorbar=False, show=False,
unit=None, cmap=None, ax=None, overlay_times=None,
title=None, evoked=False, ts_args=None, combine=None,
combine_given=False, norm=False):
"""Plot epochs image. Helper function for plot_epochs_image."""
from matplotlib.ticker import AutoLocator
if cmap is None:
cmap = 'Reds' if norm else 'RdBu_r'
tmin = epochs.times[0]
tmax = epochs.times[-1]
ax_im = ax['image']
fig = ax_im.get_figure()
# draw the image
cmap = _setup_cmap(cmap, norm=norm)
n_epochs = len(image)
extent = [tmin, tmax, 0, n_epochs]
im = ax_im.imshow(image, vmin=vmin, vmax=vmax, cmap=cmap[0], aspect='auto',
origin='lower', interpolation='nearest', extent=extent)
# optional things
if style_axes:
ax_im.set_title(title)
ax_im.set_ylabel('Epochs')
if not evoked:
ax_im.set_xlabel('Time (s)')
ax_im.axis('auto')
ax_im.axis('tight')
ax_im.axvline(0, color='k', linewidth=1, linestyle='--')
if overlay_times is not None:
ax_im.plot(overlay_times, 0.5 + np.arange(n_epochs), 'k',
linewidth=2)
ax_im.set_xlim(tmin, tmax)
# draw the evoked
if evoked:
from . import plot_compare_evokeds
pass_combine = (combine if combine_given else None)
_picks = [0] if len(picks) == 1 else None # prevent applying GFP
plot_compare_evokeds({'cond': list(epochs.iter_evoked(copy=False))},
picks=_picks, axes=ax['evoked'],
combine=pass_combine, **ts_args)
ax['evoked'].set_xlim(tmin, tmax)
ax['evoked'].lines[0].set_clip_on(True)
ax['evoked'].collections[0].set_clip_on(True)
ax['evoked'].get_shared_x_axes().join(ax['evoked'], ax_im)
# fix the axes for proper updating during interactivity
loc = ax_im.xaxis.get_major_locator()
ax['evoked'].xaxis.set_major_locator(loc)
ax['evoked'].yaxis.set_major_locator(AutoLocator())
# draw the colorbar
if colorbar:
from matplotlib.pyplot import colorbar as cbar
this_colorbar = cbar(im, cax=ax['colorbar'])
this_colorbar.ax.set_ylabel(unit, rotation=270, labelpad=12)
if cmap[1]:
ax_im.CB = DraggableColorbar(this_colorbar, im)
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
tight_layout(fig=fig)
# finish
plt_show(show)
return fig
def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown subj',
color=(0.8, 0.8, 0.8), width=0.8, ignore=('IGNORED',),
show=True):
"""Show the channel stats based on a drop_log from Epochs.
Parameters
----------
drop_log : list of list
Epoch drop log from Epochs.drop_log.
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str | None
The subject name to use in the title of the plot. If ``None``, do not
display a subject name.
.. versionchanged:: 0.23
Added support for ``None``.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
from ..epochs import _drop_log_stats
percent = _drop_log_stats(drop_log, ignore)
if percent < threshold:
logger.info('Percent dropped epochs < supplied threshold; not '
'plotting drop log.')
return
scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
ch_names = np.array(list(scores.keys()))
counts = np.array(list(scores.values()))
# init figure, handle easy case (no drops)
fig, ax = plt.subplots()
title = f'{percent:.1f}% of all epochs rejected'
if subject is not None:
title = f'{subject}: {title}'
ax.set_title(title)
if len(ch_names) == 0:
ax.text(0.5, 0.5, 'No drops', ha='center', fontsize=14)
return fig
# count epochs that aren't fully caught by `ignore`
n_used = sum([any(ch not in ignore for ch in d) or len(d) == 0
for d in drop_log])
# calc plot values
n_bars = min(n_max_plot, len(ch_names))
x = np.arange(n_bars)
y = 100 * counts / n_used
order = np.flipud(np.argsort(y))
ax.bar(x, y[order[:n_bars]], color=color, width=width, align='center')
ax.set_xticks(x)
ax.set_xticklabels(ch_names[order[:n_bars]], rotation=45, size=10,
horizontalalignment='right')
ax.set_ylabel('% of epochs rejected')
ax.grid(axis='y')
tight_layout(pad=1, fig=fig)
plt_show(show)
return fig
@fill_doc
def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20, n_channels=20,
title=None, events=None, event_color=None,
order=None, show=True, block=False, decim='auto',
noise_cov=None, butterfly=False, show_scrollbars=True,
epoch_colors=None, event_id=None, group_by='type'):
"""Visualize epochs.
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side of
the main axes. Calling this function drops all the selected bad epochs as
well as bad epochs marked beforehand with rejection parameters.
Parameters
----------
epochs : instance of Epochs
The epochs object.
%(picks_good_data)s
scalings : dict | 'auto' | None
Scaling factors for the traces. If any fields in scalings are 'auto',
the scaling factor is set to match the 99.5th percentile of a subset of
the corresponding data. If scalings == 'auto', all scalings fields are
set to 'auto'. If any fields are 'auto' and data is not preloaded,
a subset of epochs up to 100 Mb will be loaded. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4,
whitened=10.)
n_epochs : int
The number of epochs per view. Defaults to 20.
n_channels : int
The number of channels per view. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
Defaults to None.
events : None | array, shape (n_events, 3)
Events to show with vertical bars. You can use `~mne.viz.plot_events`
as a legend for the colors. By default, the coloring scheme is the
same. Defaults to ``None``.
.. warning:: If the epochs have been resampled, the events no longer
align with the data.
.. versionadded:: 0.14.0
%(event_color)s
Defaults to ``None``.
order : array of str | None
Order in which to plot channel types.
.. versionadded:: 0.18.0
show : bool
Show figure if True. Defaults to True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on an epoch.
Defaults to False.
decim : int | 'auto'
Amount to decimate the data during display for speed purposes.
You should only decimate if the data are sufficiently low-passed,
otherwise aliasing can occur. The 'auto' mode (default) uses
the decimation that results in a sampling rate at least three times
larger than ``info['lowpass']`` (e.g., a 40 Hz lowpass will result in
at least a 120 Hz displayed sample rate).
.. versionadded:: 0.15.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channels are scaled by ``scalings['whitened']``,
and their channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
butterfly : bool
Whether to directly call the butterfly view.
.. versionadded:: 0.18.0
%(show_scrollbars)s
epoch_colors : list of (n_epochs) list (of n_channels) | None
Colors to use for individual epochs. If None, use default colors.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and associated event
integers as values. Useful when ``events`` contains event numbers not
present in ``epochs.event_id`` (e.g., because of event subselection).
Values in ``event_id`` will take precedence over those in
``epochs.event_id`` when there are overlapping keys.
.. versionadded:: 0.20
%(browse_group_by)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
Notes
-----
The arrow keys (up/down/left/right) can be used to navigate between
channels and epochs and the scaling can be adjusted with - and + (or =)
keys, but this depends on the backend matplotlib is configured to use
(e.g., mpl.use(``TkAgg``) should work). Full screen mode can be toggled
with f11 key. The amount of epochs and channels per view can be adjusted
with home/end and page down/page up keys. These can also be set through
options dialog by pressing ``o`` key. ``h`` key plots a histogram of
peak-to-peak values along with the used rejection thresholds. Butterfly
plot can be toggled with ``b`` key. Right mouse click adds a vertical line
to the plot. Click 'help' button at bottom left corner of the plotter to
view all the options.
.. versionadded:: 0.10.0
"""
from ._figure import _browse_figure
epochs.drop_bad()
info = epochs.info.copy()
sfreq = info['sfreq']
projs = info['projs']
projs_on = np.full_like(projs, epochs.proj, dtype=bool)
if not epochs.proj:
info['projs'] = list()
# handle defaults / check arg validity
color = _handle_default('color', None)
scalings = _compute_scalings(scalings, epochs)
scalings = _handle_default('scalings_plot_raw', scalings)
if scalings['whitened'] == 'auto':
scalings['whitened'] = 1.
units = _handle_default('units', None)
unit_scalings = _handle_default('scalings', None)
decim, picks_data = _handle_decim(epochs.info.copy(), decim, None)
noise_cov = _check_cov(noise_cov, epochs.info)
event_id_rev = {v: k for k, v in (event_id or {}).items()}
_check_option('group_by', group_by,
('selection', 'position', 'original', 'type'))
# validate epoch_colors
_validate_type(epoch_colors, (list, None), 'epoch_colors')
if epoch_colors is not None:
if len(epoch_colors) != len(epochs.events):
msg = ('epoch_colors must have length equal to the number of '
f'epochs ({len(epochs)}); got length {len(epoch_colors)}.')
raise ValueError(msg)
for ix, this_colors in enumerate(epoch_colors):
_validate_type(this_colors, list, f'epoch_colors[{ix}]')
if len(this_colors) != len(epochs.ch_names):
msg = (f'epoch colors for epoch {ix} has length '
f'{len(this_colors)}, expected {len(epochs.ch_names)}.')
raise ValueError(msg)
# handle time dimension
n_epochs = min(n_epochs, len(epochs))
n_times = len(epochs) * len(epochs.times)
duration = n_epochs * len(epochs.times) / sfreq
# NB: this includes start and end of data:
boundary_times = np.arange(len(epochs) + 1) * len(epochs.times) / sfreq
# events
if events is not None:
event_nums = events[:, 2]
event_samps = events[:, 0]
epoch_n_samps = len(epochs.times)
# handle overlapping epochs (each event may show up in multiple places)
boundaries = (epochs.events[:, [0]] + np.array([-1, 1])
* epochs.time_as_index(0))
in_bounds = np.logical_and(boundaries[:, [0]] <= event_samps,
event_samps < boundaries[:, [1]])
event_ixs = [np.nonzero(a)[0] for a in in_bounds.T]
warned = False
event_times = list()
event_numbers = list()
for samp, num, _ixs in zip(event_samps, event_nums, event_ixs):
relevant_epoch_events = epochs.events[:, 0][_ixs]
if len(relevant_epoch_events) > 1 and not warned:
logger.info('You seem to have overlapping epochs. Some event '
'lines may be duplicated in the plot.')
warned = True
offsets = samp - relevant_epoch_events + epochs.time_as_index(0)
this_event_times = (_ixs * epoch_n_samps + offsets) / sfreq
event_times.extend(this_event_times)
event_numbers.extend([num] * len(_ixs))
event_nums = np.array(event_numbers)
event_times = np.array(event_times)
else:
event_nums = None
event_times = None
event_color_dict = _make_event_color_dict(event_color, events, event_id)
# determine trace order
picks = _picks_to_idx(info, picks)
n_channels = min(n_channels, len(picks))
ch_names = np.array(epochs.ch_names)
ch_types = np.array(epochs.get_channel_types())
order = _get_channel_plotting_order(order, ch_types, picks)
selections = None
if group_by in ('selection', 'position'):
selections = _setup_channel_selections(epochs, group_by, order)
order = np.concatenate(list(selections.values()))
default_selection = list(selections)[0]
n_channels = len(selections[default_selection])
# generate window title
if title is None:
title = epochs._name
if title is None or len(title) == 0:
title = 'Epochs'
elif not isinstance(title, str):
raise TypeError(f'title must be None or a string, got a {type(title)}')
params = dict(inst=epochs,
info=info,
n_epochs=n_epochs,
# channels and channel order
ch_names=ch_names,
ch_types=ch_types,
ch_order=order,
picks=order[:n_channels],
n_channels=n_channels,
picks_data=picks_data,
group_by=group_by,
ch_selections=selections,
# time
t_start=0,
duration=duration,
n_times=n_times,
first_time=0,
time_format='float',
decim=decim,
boundary_times=boundary_times,
# events
event_id_rev=event_id_rev,
event_color_dict=event_color_dict,
event_nums=event_nums,
event_times=event_times,
# preprocessing
projs=projs,
projs_on=projs_on,
apply_proj=epochs.proj,
remove_dc=True,
filter_coefs=None,
filter_bounds=None,
noise_cov=noise_cov,
use_noise_cov=noise_cov is not None,
# scalings
scalings=scalings,
units=units,
unit_scalings=unit_scalings,
# colors
ch_color_bad=(0.8, 0.8, 0.8),
ch_color_dict=color,
epoch_color_bad=(1, 0, 0),
epoch_colors=epoch_colors,
# display
butterfly=butterfly,
clipping=None,
scrollbars_visible=show_scrollbars,
scalebars_visible=False,
window_title=title,
xlabel='Epoch number')
fig = _browse_figure(**params)
fig._update_picks()
# make channel selection dialog, if requested (doesn't work well in init)
if group_by in ('selection', 'position'):
fig._create_selection_fig()
fig._update_projector()
fig._update_trace_offsets()
fig._update_data()
fig._draw_traces()
plt_show(show, block=block)
return fig
@verbose
def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, ax=None, color='black',
xscale='linear', area_mode='std', area_alpha=0.33,
dB=True, estimate='auto', show=True, n_jobs=1,
average=False, line_alpha=None, spatial_colors=True,
sphere=None, exclude='bads', verbose=None):
"""%(plot_psd_doc)s.
Parameters
----------
epochs : instance of Epochs
The epochs object.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
%(plot_psd_picks_good_data)s
ax : instance of Axes | None
Axes to plot into. If None, axes will be created.
%(plot_psd_color)s
%(plot_psd_xscale)s
%(plot_psd_area_mode)s
%(plot_psd_area_alpha)s
%(plot_psd_dB)s
%(plot_psd_estimate)s
%(show)s
%(n_jobs)s
%(plot_psd_average)s
%(plot_psd_line_alpha)s
%(plot_psd_spatial_colors)s
%(topomap_sphere_auto)s
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the bad channels
are excluded. Pass an empty list to plot all channels (including
channels marked "bad", if any).
.. versionadded:: 0.24.0
%(verbose)s
Returns
-------
fig : instance of Figure
Figure with frequency spectra of the data channels.
"""
from ._figure import _psd_figure
# generate figure
# epochs always use multitaper, not Welch, so no need to allow "window"
# param above
fig = _psd_figure(
inst=epochs, proj=proj, picks=picks, axes=ax, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, sphere=sphere, xscale=xscale, dB=dB,
average=average, estimate=estimate, area_mode=area_mode,
line_alpha=line_alpha, area_alpha=area_alpha, color=color,
spatial_colors=spatial_colors, n_jobs=n_jobs, bandwidth=bandwidth,
adaptive=adaptive, low_bias=low_bias, normalization=normalization,
window='hamming', exclude=exclude)
plt_show(show)
return fig
| bsd-3-clause |
jdrudolph/scikit-bio | setup.py | 6 | 4944 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
import platform
import re
import ast
from setuptools import find_packages, setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext as _build_ext
# Bootstrap setup.py with numpy
# Huge thanks to coldfix's solution
# http://stackoverflow.com/a/21621689/579416
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
# version parsing from __init__ pulled from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('skbio/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
classes = """
Development Status :: 4 - Beta
License :: OSI Approved :: BSD License
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = ('Data structures, algorithms and educational '
'resources for bioinformatics.')
with open('README.rst') as f:
long_description = f.read()
# Dealing with Cython
USE_CYTHON = os.environ.get('USE_CYTHON', False)
ext = '.pyx' if USE_CYTHON else '.c'
# There's a bug in some versions of Python 3.4 that propagates
# -Werror=declaration-after-statement to extensions, instead of just affecting
# the compilation of the interpreter. See http://bugs.python.org/issue21121 for
# details. This acts as a workaround until the next Python 3 release -- thanks
# Wolfgang Maier (wolma) for the workaround!
ssw_extra_compile_args = ['-Wno-error=declaration-after-statement']
# Users with i686 architectures have reported that adding this flag allows
# SSW to be compiled. See https://github.com/biocore/scikit-bio/issues/409 and
# http://stackoverflow.com/q/26211814/3776794 for details.
if platform.machine() == 'i686':
ssw_extra_compile_args.append('-msse2')
extensions = [
Extension("skbio.stats.__subsample",
["skbio/stats/__subsample" + ext]),
Extension("skbio.alignment._ssw_wrapper",
["skbio/alignment/_ssw_wrapper" + ext,
"skbio/alignment/_lib/ssw.c"],
extra_compile_args=ssw_extra_compile_args)
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(name='scikit-bio',
version=version,
license='BSD',
description=description,
long_description=long_description,
author="scikit-bio development team",
author_email="[email protected]",
maintainer="scikit-bio development team",
maintainer_email="[email protected]",
url='http://scikit-bio.org',
test_suite='nose.collector',
packages=find_packages(),
ext_modules=extensions,
cmdclass={'build_ext': build_ext},
setup_requires=['numpy >= 1.9.2'],
install_requires=[
'bz2file >= 0.98',
'CacheControl[FileCache] >= 0.11.5',
'contextlib2 >= 0.4.0',
'decorator >= 3.4.2',
'future >= 0.14.3',
'IPython >= 3.2.0',
'matplotlib >= 1.4.3',
'natsort >= 4.0.3',
'numpy >= 1.9.2',
'pandas >= 0.16.2',
'scipy >= 0.15.1',
'six >= 1.9.0'
],
extras_require={'test': ["HTTPretty", "nose", "pep8", "flake8",
"python-dateutil", "check-manifest"],
'doc': ["Sphinx == 1.2.2", "sphinx-bootstrap-theme"]},
classifiers=classifiers,
package_data={
'skbio.diversity.alpha.tests': ['data/qiime-191-tt/*'],
'skbio.diversity.beta.tests': ['data/qiime-191-tt/*'],
'skbio.io.tests': ['data/*'],
'skbio.io.format.tests': ['data/*'],
'skbio.stats.tests': ['data/*'],
'skbio.stats.distance.tests': ['data/*'],
'skbio.stats.ordination.tests': ['data/*']
}
)
| bsd-3-clause |
mfittere/SixDeskDB | old/danilo/DA_FullStat_v2.py | 2 | 8226 | #!/usr/bin/python
# python re-implementation of read10b.f done by Danilo Banfi ([email protected])
# This compute DA starting from the local .db produced by CreateDB.py
# Below are indicated thing that need to be edited by hand.
# You only have to provide the name of the study <study_name> like
# python CreateDB.py <write_your_fancy_study_name_here>
# DA result will be written in file DA_<study_name>.txt with the usual meaning for all seeds
# In file DA_<study_name>_summary.txt you will find study,angle,min,mean,max,nega,Amin,Amax of
# lost1 , as in old .plot file
#
# NOTA: please use python version >=2.6
import sys
import getopt
from sixdesk import *
import numpy as np
import math
import matplotlib.pyplot as plt
# PART TO BE EDITED ========================================================================
Elhc=2.5 #normalized emittance as in "general input"
Einj=7460.5 #gamma as in "general input"
workarea='/afs/cern.ch/user/d/dbanfi/SixTrack_NEW' #where input db is, and where output will be written
# DO NOT EDIT BEYOND HERE IF YOU'RE NOT REALLY SURE =======================================
rectype=[('study','S100'),('seed','int'),('betx' ,'float'),('bety' ,'float'),('sigx1' ,'float'),('sigy1' ,'float'),('emitx' ,'float'),('emity' ,'float'),
('sigxavgnld' ,'float') ,('sigyavgnld' ,'float'),('betx2' ,'float'),('bety2' ,'float'),('distp' ,'float'),('dist' ,'float'),
('sturns1' ,'int') ,('sturns2' ,'int') ,('turn_max','int') ,('amp1' ,'float'),('amp2' ,'float'),('angle' ,'float')]
names='study,seed,betx,bety,sigx1,sigy1,emitx,emity,sigxavgnld,sigyavgnld,betx2,bety2,distp,dist,sturns1,sturns2,turn_max,amp1,amp2,angle'
outtype=[('study','S100'),('seed','int'),('angle','float'),('achaos','float'),('achaos1','float'),('alost1','float'),('alost2','float'),('Amin','float'),('Amax','float')]
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
print "use: DA_FullStat_public <study_name>"
sys.exit(0)
if len(args)<1 :
print "too few options: please provide <study_name>"
sys.exit()
if len(args)>1 :
print "too many options: please provide only <study_name>"
sys.exit()
database='%s/%s.db'%(workarea,args[0])
if os.path.isfile(database):
sd=SixDeskDB(database)
else:
print "ERROR: file %s does not exists!" %(database)
sys.exit()
f = open('DA_%s.txt'%args[0], 'w')
tmp=np.array(sd.execute('SELECT DISTINCT %s FROM results'%names),dtype=rectype)
for angle in np.unique(tmp['angle']):
for seed in np.unique(tmp['seed']):
ich1 = 0
ich2 = 0
ich3 = 0
icount = 1.
itest = 0
iin = -999
iend = -999
alost1 = 0.
alost2 = 0.
achaos = 0
achaos1 = 0
mask=[(tmp['betx']>0) & (tmp['emitx']>0) & (tmp['bety']>0) & (tmp['emity']>0) & (tmp['angle']==angle) & (tmp['seed']==seed)]
inp=tmp[mask]
if inp.size<2 :
print 'not enought data for angle = %s' %angle
break
zero = 1e-10
for itest in range(0,inp.size):
if inp['betx'][itest]>zero and inp['emitx'][itest]>zero : inp['sigx1'][itest] = math.sqrt(inp['betx'][itest]*inp['emitx'][itest])
if inp['bety'][itest]>zero and inp['emity'][itest]>zero : inp['sigy1'][itest] = math.sqrt(inp['bety'][itest]*inp['emity'][itest])
if inp['betx'][itest]>zero and inp['emitx'][itest]>zero and inp['bety'][itest]>zero and inp['emity'][itest]>zero: itest+=1
iel=inp.size-1
rat=0
if inp['sigx1'][0]>0:
rat=pow(inp['sigy1'][0],2)*inp['betx'][0]/(pow(inp['sigx1'][0],2)*inp['bety'][0])
if pow(inp['sigx1'][0],2)*inp['bety'][0]<pow(inp['sigy1'][0],2)*inp['betx'][0]:
rat=2
if inp['emity'][0]>inp['emitx'][0]:
rat=0
dummy=np.copy(inp['betx'])
inp['betx']=inp['bety']
inp['bety']=dummy
dummy=np.copy(inp['betx2'])
inp['betx2']=inp['bety2']
inp['bety2']=dummy
dummy=np.copy(inp['sigx1'])
inp['sigx1']=inp['sigy1']
inp['sigy1']=dummy
dummy=np.copy(inp['sigxavgnld'])
inp['sigxavgnld']=inp['sigyavgnld']
inp['sigyavgnld']=dummy
dummy=np.copy(inp['emitx'])
inp['emitx']=inp['emity']
inp['emity']=dummy
sigma=math.sqrt(inp['betx'][0]*Elhc/Einj)
if abs(inp['emity'][0])>0 and abs(inp['sigx1'][0])>0:
if abs(inp['emitx'][0])<zero :
rad=math.sqrt(1+(pow(inp['sigy1'][0],2)*inp['betx'][0])/(pow(inp['sigx1'][0],2)*inp['bety'][0]))/sigma
else:
rad=math.sqrt((abs(inp['emitx'][0])+abs(inp['emity'][0]))/abs(inp['emitx'][0]))/sigma
if abs(inp['sigxavgnld'][0])>zero and abs(inp['bety'][0])>zero:
if abs(inp['emitx'][0]) < zero :
rad1=math.sqrt(1+(pow(inp['sigyavgnld'][0],2)*inp['betx'][0])/(pow(inp['sigxavgnld'][0],2)*inp['bety'][0]))/sigma
else:
rad1=(inp['sigyavgnld'][0]*math.sqrt(inp['betx'][0])-inp['sigxavgnld'][0]*math.sqrt(inp['bety2'][0]))/(inp['sigxavgnld'][0]*math.sqrt(inp['bety'][0])-inp['sigyavgnld'][0]*math.sqrt(inp['betx2'][0]))
rad1=math.sqrt(1+rad1*rad1)/sigma
else:
rad1 = 1
for i in range(0,iel+1):
if ich1 == 0 and (inp['distp'][i] > 2. or inp['distp'][i]<=0.5):
ich1 = 1
achaos=rad*inp['sigx1'][i]
iin=i
if ich3 == 0 and inp['dist'][i] > 1e-2 :
ich3=1
iend=i
achaos1=rad*inp['sigx1'][i]
if ich2 == 0 and (inp['sturns1'][i]<inp['turn_max'][i] or inp['sturns2'][i]<inp['turn_max'][i]):
ich2 = 1
alost2 = rad*inp['sigx1'][i]
if iin != -999 and iend == -999 : iend=iel
if iin != -999 and iend >= iin :
for i in range(iin,iend+1) :
alost1 += (rad1/rad) * (inp['sigxavgnld'][i]/inp['sigx1'][i])
alost1 = alost1/(float(iend)-iin+1)
if alost1 >= 1.1 or alost1 <= 0.9: alost1= -1. * alost1
else:
alost1 = 1.0
alost1=alost1*alost2
# print "study=%s seed=%s angle = %s achaos= %s achaos1= %s alost1= %s alost2= %s rad*sigx1[1]= %s rad*sigx1[iel]= %s" %(args[0],seed,angle,achaos,achaos1,alost1,alost2,rad*inp['sigx1'][0],rad*inp['sigx1'][iel])
f.write('%s %s %s %s %s %s %s %s %s \n'%(args[0],seed,angle,achaos,achaos1,alost1,alost2,rad*inp['sigx1'][0],rad*inp['sigx1'][iel]))
f.close()
f = open('DA_%s.txt'%args[0], 'r')
final=np.genfromtxt(f,dtype=outtype)
f.close()
f1 = open('DA_%s_summary.txt'%args[0], 'w')
for angle in np.unique(final['angle']):
study=final['study'][0]
mini = np.min(np.abs(final['alost1'][(final['angle']==angle)]))
mean =np.mean(np.abs(final['alost1'][(final['angle']==angle)&(final['alost1']!=0)]))
maxi = np.max(np.abs(final['alost1'][(final['angle']==angle)]))
nega = len(final['alost1'][(final['angle']==angle)&(final['alost1']<0)])
Amin = np.min(final['Amin'][final['angle']==angle])
Amax = np.max(final['Amax'][final['angle']==angle])
print study, angle, mini , mean, maxi,nega , Amin, Amax
f1.write('%s %.2f %.2f %.2f %.2f %.0f %.2f %.2f \n'%(study,angle, mini , mean, maxi,nega , Amin, Amax))
f1.close()
if __name__ == "__main__":
main()
| lgpl-2.1 |
mjgrav2001/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
Mistobaan/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans_test.py | 13 | 20278 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators import kmeans as kmeans_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
steps = 10 * self.num_points // self.batch_size
kmeans.fit(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=learn.RunConfig(tf_random_seed=14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.fit(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertNear(self.true_score, score, self.true_score * 0.01)
def _infer_helper(self, kmeans, clusters, num_points):
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
# Test predict
assignments = list(
kmeans.predict_cluster_idx(input_fn=self.input_fn(
batch_size=num_points, points=points, num_epochs=1)))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(
input_fn=lambda: (constant_op.constant(points), None))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1,
keepdims=True) - 2 * np.dot(points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.clusters()
# Run inference on small datasets.
self._infer_helper(kmeans, clusters, num_points=10)
self._infer_helper(kmeans, clusters, num_points=1)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.fit(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0, keepdims=True))[
0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0, keepdims=True))[
0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = self.kmeans.transform(input_fn=self.input_fn(
batch_size=self.num_points))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
assignments = list(
self.kmeans.predict_cluster_idx(input_fn=self.input_fn(
num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.clusters())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_idx(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=run_config.RunConfig(tf_random_seed=3))
tf_kmeans.fit(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.clusters()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None),
steps=1))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
jereze/scikit-learn | sklearn/neighbors/unsupervised.py | 117 | 4755 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
wavelets/BayesDataAnalysisWithPyMC | BayesDataAnalysisWithPymc/BernTwoPyMC.py | 2 | 2263 | # -*- coding: utf-8 -*-
''' Model for inferring two binomial proportions via MCMC.
Python (PyMC) adaptation of the R code from "Doing Bayesian Data Analysis",
by John K. Krushcke.
More info: http://doingbayesiandataanalysis.blogspot.com.br/
'''
from __future__ import division
import pymc
from matplotlib import pyplot as plot
from plot_post import plot_post
# TODO: It would be good to import data from CSV files.
# Model specification in PyMC goes backwards, in comparison to JAGS:
# first the prior are specified, THEN the likelihood function.
# TODO: With PyMC, it´s possible to define many stochastic variables
# in just one variable name using the 'size' function parameter.
# But for now, I will use multiple variable names for simplicity.
theta1 = pymc.Beta('theta1', alpha=3, beta=3)
theta2 = pymc.Beta('theta2', alpha=3, beta=3)
# Define the observed data.
data = [[1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0]]
# Define the likelihood function for the observed data.
like1 = pymc.Bernoulli('like1', theta1, observed=True, value=data[0])
like2 = pymc.Bernoulli('like2', theta2, observed=True, value=data[1])
# Use the PyMC 'Model' class to collect all the variables we are interested in.
model = pymc.Model([theta1, theta2])
# And instantiate the MCMC class to sample the posterior.
mcmc = pymc.MCMC(model)
mcmc.sample(40000, 10000, 1)
# Use PyMC built-in plot function to show graphs of the samples.
# pymc.Matplot.plot(mcmc)
# plot.show()
# Let's try plotting using Matplotlib's 'pyplot'.
# First, we extract the traces for the parameters of interest.
theta1_samples = mcmc.trace('theta1')[:]
theta2_samples = mcmc.trace('theta2')[:]
theta_diff = theta2_samples - theta1_samples
# Then, we plot a histogram of their individual sample values.
plot.figure(figsize=(8.0, 10))
plot.subplot(311)
plot_post(theta1_samples, title=r'Posterior of $\theta_1$')
plot.subplot(312)
plot_post(theta2_samples, title=r'Posterior of $\theta_2$')
plot.subplot(313)
plot_post(theta_diff, title=r'Posterior of $\Delta\theta$', comp=0.0)
plot.subplots_adjust(hspace=0.5)
plot.show()
| mit |
crunchbang/Machine_Perception-DS863 | Assignment_1/src/question_7.py | 2 | 1036 | import cv2
import numpy as np
from matplotlib import pyplot as plt
orig = cv2.imread("lenna.jpg", cv2.IMREAD_GRAYSCALE)
noisy_img = orig.copy()
# add salt and pepper noise
# There are multiple ways to do it, this being one of them
# choose a random value in the range 0 - 0.05, the
# probablity of there being noise in a pixel
p = np.random.uniform(0, 0.05)
# create a noise matrix of the same dimension as the image with
# values uniformly distributed in the range [0, 1)
rand_noise = np.random.rand(*orig.shape)
# add noise (make the pixel black or white) at locations of the original
# image where the conditions are satisfied
noisy_img[rand_noise < p] = 0
noisy_img[rand_noise > 1 - p] = 255
filtered_img = cv2.medianBlur(noisy_img, 3)
fig = plt.figure()
images = [orig, noisy_img, filtered_img]
titles = ["Original", "Salt and Pepper noise", "filtered image"]
for i in range(len(images)):
ax = fig.add_subplot(2, 2, i + 1)
ax.set_title(titles[i])
ax.imshow(images[i], cmap="gray")
plt.axis("off")
plt.show()
| mit |
ritviksahajpal/LUH2 | LUH2/GlobalCropRotations/crop_rotations.py | 1 | 9833 | import itertools
import logging
import os
import re
import sys
import pdb
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
import pandas as pd
import pygeoutil.util as util
import constants
import crop_stats
import plots
# Logging
cur_flname = os.path.splitext(os.path.basename(__file__))[0]
LOG_FILENAME = constants.log_dir + os.sep + 'Log_' + cur_flname + '.txt'
util.make_dir_if_missing(constants.log_dir)
logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO, filemode='w',
format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt="%m-%d %H:%M") # Logging levels are DEBUG, INFO, WARNING, ERROR, and CRITICAL
# Output to screen
logger = logging.getLogger(cur_flname)
logger.addHandler(logging.StreamHandler(sys.stdout))
class CropRotations:
def __init__(self):
self.name_country_col = 'Country_FAO'
self.cft_type = 'functional crop type'
@staticmethod
def get_list_decades(lyrs):
"""
Convert a list of years to lists of list of years
FROM: [1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977]
TO: [[1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969],
[1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977]]
:param lyrs:
:return:
"""
return np.array([list(g) for k, g in itertools.groupby(lyrs, lambda i: i // 10)])
@staticmethod
def get_list_yrs(df, already_processed=False):
"""
From an input dataframe, get the list of years. The years could be in the form of columns labeled
Y1961... or in the form of 1961....
:param df:
:param already_processed:
:return:
"""
if already_processed:
vals = df.columns[df.columns.astype(str).str.contains(r'\d{4}$')].values
else:
# Create a list of columns of the form Y1961...remove the 'Y' and return list of integers
years = df.filter(regex=r'^Y\d{4}$').columns.values
vals = [y[1:] for y in years]
return map(int, vals)
def select_data_by_country(self, df, country, name_column):
"""
Select data for a country/region by country code or name
:param df:
:param country:
:param name_column:
:return:
"""
df_country = df[df[name_column] == country]
return df_country
def rename_years(self, col_name):
"""
If col_name is of the form Y1961 then return 1961,
If col_name is like 1961 then return 1961
:param col_name:
:return:
"""
if re.match(r'^Y\d{4}', col_name):
return int(col_name[1:])
else:
return col_name
def per_CFT_by_decade(self, df, cnt_name, already_processed=False):
"""
Aggregate years to decades and compute fraction
of each crop functional type in that decade
:param df:
:param cnt_name:
:param already_processed:
:return:
"""
dec_df = pd.DataFrame()
# Get list of years in FAO data
list_yrs = CropRotations.get_list_yrs(df, already_processed)
if not already_processed:
# renaming columns for years so that they do not start with a 'Y'
print self.rename_years
df.rename(columns=self.rename_years, inplace=True)
# Separate years into decades
yrs_dec = CropRotations.get_list_decades(list_yrs)
# Select data by country
out_df = self.select_data_by_country(df, cnt_name, name_column=self.name_country_col)
for dec in yrs_dec:
dec_name = str(util.round_closest(dec[0])) + 's'
total_ar = np.sum(out_df.ix[:, dec].values)
dec_df[dec_name] = out_df.ix[:, dec].sum(axis=1)/total_ar * 100
# Join the decadal dataframe with country and crop functional type name columns
dec_df = pd.concat([out_df[[self.name_country_col, self.cft_type]], dec_df], axis=1, join='inner')
return dec_df
def per_CFT_annual(self, df, cnt_name, already_processed=False):
"""
Convert a dataframe containing cropland areas by CFT for each country into percentage values
:param df:
:param cnt_name:
:param already_processed:
:return:
"""
per_df = pd.DataFrame()
# Select data by country
out_df = self.select_data_by_country(df, cnt_name, name_column=self.name_country_col)
# Get list of years in FAO data
list_yrs = CropRotations.get_list_yrs(out_df, already_processed)
for yr in list_yrs:
grp_df = out_df.groupby([self.name_country_col, self.cft_type]).agg({yr: 'sum'})
pct_df = grp_df.groupby(level=0).apply(lambda x: 100*x/float(x.sum()))
per_df = pd.concat([per_df, pct_df], axis=1, join='inner')
return per_df
def diff_ann_decadal(self):
pass
def call_R(self):
pass
def read_processed_FAO_data(self):
"""
Read in data on FAO crop acreages globally (already processed)
:return:
"""
fao_file = util.open_or_die(constants.data_dir + os.sep + constants.FAO_FILE)
return fao_file.parse(constants.FAO_SHEET)
def plot_cnt_decade(self, inp_fao_df, cnt, already_processed=False):
"""
Plot percentage of cropland area occupied by each crop functional type for a country
:param inp_fao_df:
:param cnt:
:param already_processed:
:return:
"""
out_dec_df = self.per_CFT_by_decade(inp_fao_df, cnt, already_processed)
out_dec_df = out_dec_df.set_index(self.cft_type)
ax = out_dec_df.drop(self.name_country_col, axis=1).T.\
plot(kind='bar', stacked=True, color=plots.get_colors(palette='tableau'), linewidth=0)
plots.simple_axis(ax) # Simple axis, no axis on top and right of plot
# Transparent legend in lower left corner
leg = plt.legend(loc='lower left', fancybox=None)
leg.get_frame().set_linewidth(0.0)
leg.get_frame().set_alpha(0.5)
# Set X and Y axis labels and title
ax.set_title(cnt)
ax.set_xlabel('')
plt.ylim(ymax=100)
ax.set_ylabel('Percentage of cropland area \noccupied by each crop functional type')
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
yticks = mtick.FormatStrFormatter(fmt)
ax.yaxis.set_major_formatter(yticks)
# remove ticks from X axis
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off') # ticks along the top edge are off
# Rotate the X axis labels to be horizontal
locs, labels = plt.xticks()
plt.setp(labels, rotation=0)
plt.tight_layout()
plt.savefig(constants.out_dir + os.sep + cnt + '.png', bbox_inches='tight', dpi=600)
plt.close()
def plot_cnt_mean_decade(self, inp_fao_df, cnt, already_processed=False):
"""
Plot mean crop functional type area in each decade
:param inp_fao_df:
:param cnt:
:param already_processed:
:return:
"""
out_dec_df = self.per_CFT_by_decade(inp_fao_df, cnt, already_processed)
out_dec_df = out_dec_df.set_index(self.cft_type)
ax = out_dec_df.drop(self.name_country_col, axis=1).T.\
plot(kind='bar', stacked=True, color=plots.get_colors(palette='tableau'), linewidth=0)
plots.simple_axis(ax) # Simple axis, no axis on top and right of plot
# Transparent legend in lower left corner
leg = plt.legend(loc='lower left', fancybox=None)
leg.get_frame().set_linewidth(0.0)
leg.get_frame().set_alpha(0.5)
# Set X and Y axis labels and title
ax.set_title(cnt)
ax.set_xlabel('')
plt.ylim(ymax=100)
ax.set_ylabel('Mean crop functional type area in each decade')
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
yticks = mtick.FormatStrFormatter(fmt)
ax.yaxis.set_major_formatter(yticks)
# remove ticks from X axis
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off') # ticks along the top edge are off
# Rotate the X axis labels to be horizontal
locs, labels = plt.xticks()
plt.setp(labels, rotation=0)
plt.tight_layout()
plt.savefig(constants.out_dir + os.sep + 'Mean_' + cnt + '.png', bbox_inches='tight', dpi=600)
plt.close()
def process_rotations(self):
cs = crop_stats.CropStats()
# 1. Read in data on raw FAO crop acreages globally.
# 2. Delete redundant data, data from continents/regions
# 3. Replace NaNs by 0
fao_df = cs.read_raw_FAO_data()
# Merge FAO data (raw or processed) with our crop functional type definitions
fao_df, grp_crp, grp_cnt, per_df = cs.merge_FAO_CFT(fao_df)
already_processed = False
list_countries = fao_df[self.name_country_col].unique()
for country in list_countries:
logger.info(country)
self.plot_cnt_decade(fao_df, country, already_processed)
if not already_processed:
already_processed = True
if __name__ == '__main__':
obj = CropRotations()
obj.process_rotations()
| mit |
bmazin/SDR | DataReadout/ReadoutControls/lib/make_image_v2.py | 2 | 5473 | # make_image.py
# 05/30/11 version 2 updated to make image as numpy array and return mplib figure to arcons quicklook
#from data2ascii import unpack_data
from PIL import Image
from PIL import ImageDraw
from numpy import *
import matplotlib
from matplotlib.pyplot import plot, figure, show, rc, grid
import matplotlib.pyplot as plt
#will actually need intermediate work to unpack these arrays from file and pass them in
def make_image(photon_count, median_energy, color_on = True, white_pixels = .10):
'''
Updated from 08/31/10 version. Image generation will happen on GUI machine now. organize_data
will be run on SDR to pass over binary file with arrays of each pixels photon count and median energy.
Those arrays will be unpacked in GUI image generation thread, combined into cumulative arrays if we
are doing an observation, then passes arrays of photon counts and energies to make_image
'''
array_rows = 32
array_cols = 32
total_pixels = array_rows * array_cols
print "Generating image"
im = Image.new("RGB",(array_cols,array_rows))
draw = ImageDraw.ImageDraw(im)
#to get better v gradient we want to saturate brightest 10% of pixels
#make histogram out of the lengths of each pixel. Histogram peak will be at the low end
#as most pixels will be dark, thus having small "lengths" for their photon lists.
hist_counts, hist_bins = histogram(photon_count, bins=100)
brightest_pixels = 0
nbrightestcounts = 0.0
q=1
#starting at the high end of the histogram (bins containing the pixels with the most photons),
#count backwards until we get to the 5th brightest, then set that to maximum v value.
#Thus the few brighter pixels will be saturated, and the rest will be scaled to this
#5th brightest pixel.
ncounts = float(sum(photon_count))
#print "ncounts ", ncounts
cdf = array(cumsum(hist_counts*hist_bins[:-1]),dtype = float32)
#print cdf
idx = (where(cdf > (ncounts*(1.0-white_pixels))))[0][0] #where cdf has 1-white_pixels percent of max number of counts
#print idx
vmax = hist_bins[idx]
#while float(nbrightestcounts/float(ncounts)) <= white_pixels:
#brightest_pixels += hist_bins[-q]
#nbrightestcounts += hist_counts[-q]
#q+=1
#if vmax == 0: #if vmax = 0 then no pixels are illuminated
#while vmax ==0: #check through brightest pixels until one is found
#q -= 1
#vmax = pixel_hist[1][-q]
for m in range(total_pixels):
try:
if median_energy[m] >= 3.1:
hue= 300
elif median_energy[m] <= 1.9:
hue= 0
else:
hue = int(((median_energy[m]-1.9)/(3.1-1.9))*300)
except ValueError:
hue = 150 #if median energy is NaN, that pixel has no photons, so set hue to green and v will be 0
#normalize number of photons in that pixel by vmax, then *80 to give brightness
try:
v = int((photon_count[m]/vmax)*80)
if v < 0:
v=0 #after sky subtraction we may get negative counts for some pixels
except ValueError:
v=0 #if v is NaN set v to 0
if color_on == True:
s=v #scale saturation with v so brightest pixels show most color, dimmer show less color
else:
s=0 #make image black and white if color is turned off
colorstring = "hsl(%i,%i%%,%i%%)" %(hue,s,v)
imx = m%(array_cols)
#to flip image vertically use: imy = m/array_cols
imy = (array_rows - 1) - m/(array_cols)
draw.point((imx,imy),colorstring)
return im
#10/5/10 added main portion so single binary data file can be turned into an image
if __name__ == "__main__":
file = raw_input("enter binary data file name: ")
newpixel, newtime, newenergy = unpack_data(file)
imagefile = raw_input("enter image file name to save data to: ")
obs = len(newenergy)
print "creating list of each pixel's photons"
each_pixels_photons = []
lengths = []
#generate empty list for pixels to have photons dumped into
for j in range(1024):
each_pixels_photons.append([])
#search through data and place energies in right pixels
for k in range(obs):
each_pixels_photons[newpixel[k]].append(newenergy[k])
for l in range(1024):
lengths.append(len(each_pixels_photons[l]))
print "Generating image"
im = Image.new("RGB",(32,32))
draw = ImageDraw.ImageDraw(im)
#to get better v distribution we want to saturate brightest 0.5% of pixels
pixel_hist = histogram(lengths, bins=100)
photon_sum=0
q=1
while photon_sum <=4:
photon_sum += pixel_hist[0][-q]
q+=1
vmax = pixel_hist[1][-q]
for m in range(1024):
#normalize pixel's ave energy by max of 5, then multiply by 300 to give hue value between 0 and 300
median_energy = median(each_pixels_photons[m])
try:
if median_energy >= 3.1:
hue= 300
elif median_energy <= 1.9:
hue= 0
else:
hue = int(((median_energy-1.9)/(3.1-1.9))*300)
except ValueError:
hue = 150 #if median energy is NaN, that pixel has no photons, so set hue to green and v will be 0
#normalize number of photons in that pixel by vmax, then *80 to give brightness
try:
v = (len(each_pixels_photons[m])/vmax)*80
except ValueError:
v=0 #if v is NaN set v to 0
s=v #scale saturation with v so brightest pixels show most color, dimmer show less color
colorstring = "hsl(%i,%i%%,%i%%)" %(hue,s,v)
imx = m%(32)
#switch between two lines below to flip array vertically
#imy = m/array_cols
imy = (31) - m/(32)
#imy = m/(32)
draw.point((imx,imy),colorstring)
im.show()
| gpl-2.0 |
superbobry/pymc3 | pymc3/stats.py | 1 | 20493 | """Utility functions for PyMC"""
import numpy as np
import pandas as pd
import itertools
import sys
import warnings
from .model import modelcontext
from .backends import tracetab as ttab
__all__ = ['autocorr', 'autocov', 'dic', 'bpic', 'waic', 'hpd', 'quantiles', 'mc_error',
'summary', 'df_summary']
def statfunc(f):
"""
Decorator for statistical utility function to automatically
extract the trace array from whatever object is passed.
"""
def wrapped_f(pymc3_obj, *args, **kwargs):
try:
vars = kwargs.pop('vars', pymc3_obj.varnames)
chains = kwargs.pop('chains', pymc3_obj.chains)
except AttributeError:
# If fails, assume that raw data was passed.
return f(pymc3_obj, *args, **kwargs)
burn = kwargs.pop('burn', 0)
thin = kwargs.pop('thin', 1)
combine = kwargs.pop('combine', False)
## Remove outer level chain keys if only one chain)
squeeze = kwargs.pop('squeeze', True)
results = {chain: {} for chain in chains}
for var in vars:
samples = pymc3_obj.get_values(var, chains=chains, burn=burn,
thin=thin, combine=combine,
squeeze=False)
for chain, data in zip(chains, samples):
results[chain][var] = f(np.squeeze(data), *args, **kwargs)
if squeeze and (len(chains) == 1 or combine):
results = results[chains[0]]
return results
wrapped_f.__doc__ = f.__doc__
wrapped_f.__name__ = f.__name__
return wrapped_f
@statfunc
def autocorr(x, lag=1):
"""Sample autocorrelation at specified lag.
The autocorrelation is the correlation of x_i with x_{i+lag}.
"""
S = autocov(x, lag)
return S[0, 1]/np.sqrt(np.prod(np.diag(S)))
@statfunc
def autocov(x, lag=1):
"""
Sample autocovariance at specified lag.
The autocovariance is a 2x2 matrix with the variances of
x[:-lag] and x[lag:] in the diagonal and the autocovariance
on the off-diagonal.
"""
x = np.asarray(x)
if not lag: return 1
if lag < 0:
raise ValueError("Autocovariance lag must be a positive integer")
return np.cov(x[:-lag], x[lag:], bias=1)
def dic(trace, model=None):
"""
Calculate the deviance information criterion of the samples in trace from model
Read more theory here - in a paper by some of the leading authorities on Model Selection - http://bit.ly/1W2YJ7c
"""
model = modelcontext(model)
transformed_rvs = [rv for rv in model.free_RVs if hasattr(rv.distribution, 'transform_used')]
if transformed_rvs:
warnings.warn("""
DIC estimates are biased for models that include transformed random variables.
See https://github.com/pymc-devs/pymc3/issues/789.
The following random variables are the result of transformations:
{}
""".format(', '.join(rv.name for rv in transformed_rvs)))
mean_deviance = -2 * np.mean([model.logp(pt) for pt in trace])
free_rv_means = {rv.name: trace[rv.name].mean(axis=0) for rv in model.free_RVs}
deviance_at_mean = -2 * model.logp(free_rv_means)
return 2 * mean_deviance - deviance_at_mean
def waic(trace, model=None):
"""
Calculate the widely available information criterion of the samples in trace from model.
Read more theory here - in a paper by some of the leading authorities on Model Selection - http://bit.ly/1W2YJ7c
"""
model = modelcontext(model)
transformed_rvs = [rv for rv in model.free_RVs if hasattr(rv.distribution, 'transform_used')]
if transformed_rvs:
warnings.warn("""
WAIC estimates are biased for models that include transformed random variables.
See https://github.com/pymc-devs/pymc3/issues/789.
The following random variables are the result of transformations:
{}
""".format(', '.join(rv.name for rv in transformed_rvs)))
log_py = []
for obs in model.observed_RVs:
log_py.append([obs.logp_elemwise(pt) for pt in trace ])
log_py = np.hstack(log_py)
lppd = np.sum(np.log(np.mean(np.exp(log_py), axis=0)))
p_waic = np.sum(np.var(log_py, axis=0))
return -2 * lppd + 2 * p_waic
def bpic(trace, model=None):
"""
Calculates Bayesian predictive information criterion n of the samples in trace from model
Read more theory here - in a paper by some of the leading authorities on Model Selection - http://bit.ly/1W2YJ7c
"""
model = modelcontext(model)
transformed_rvs = [rv for rv in model.free_RVs if hasattr(rv.distribution, 'transform_used')]
if transformed_rvs:
warnings.warn("""
BPIC estimates are biased for models that include transformed random variables.
See https://github.com/pymc-devs/pymc3/issues/789.
The following random variables are the result of transformations:
{}
""".format(', '.join(rv.name for rv in transformed_rvs)))
mean_deviance = -2 * np.mean([model.logp(pt) for pt in trace])
free_rv_means = {rv.name: trace[rv.name].mean(axis=0) for rv in model.free_RVs}
deviance_at_mean = -2 * model.logp(free_rv_means)
return 3 * mean_deviance - 2 * deviance_at_mean
def make_indices(dimensions):
# Generates complete set of indices for given dimensions
level = len(dimensions)
if level == 1: return list(range(dimensions[0]))
indices = [[]]
while level:
_indices = []
for j in range(dimensions[level-1]):
_indices += [[j]+i for i in indices]
indices = _indices
level -= 1
try:
return [tuple(i) for i in indices]
except TypeError:
return indices
def calc_min_interval(x, alpha):
"""Internal method to determine the minimum interval of
a given width
Assumes that x is sorted numpy array.
"""
n = len(x)
cred_mass = 1.0-alpha
interval_idx_inc = int(np.floor(cred_mass*n))
n_intervals = n - interval_idx_inc
interval_width = x[interval_idx_inc:] - x[:n_intervals]
if len(interval_width) == 0:
raise ValueError('Too few elements for interval calculation')
min_idx = np.argmin(interval_width)
hdi_min = x[min_idx]
hdi_max = x[min_idx+interval_idx_inc]
return hdi_min, hdi_max
@statfunc
def hpd(x, alpha=0.05):
"""Calculate highest posterior density (HPD) of array for given alpha. The HPD is the
minimum width Bayesian credible interval (BCI).
:Arguments:
x : Numpy array
An array containing MCMC samples
alpha : float
Desired probability of type I error (defaults to 0.05)
"""
# Make a copy of trace
x = x.copy()
# For multivariate node
if x.ndim > 1:
# Transpose first, then sort
tx = np.transpose(x, list(range(x.ndim))[1:]+[0])
dims = np.shape(tx)
# Container list for intervals
intervals = np.resize(0.0, dims[:-1]+(2,))
for index in make_indices(dims[:-1]):
try:
index = tuple(index)
except TypeError:
pass
# Sort trace
sx = np.sort(tx[index])
# Append to list
intervals[index] = calc_min_interval(sx, alpha)
# Transpose back before returning
return np.array(intervals)
else:
# Sort univariate node
sx = np.sort(x)
return np.array(calc_min_interval(sx, alpha))
@statfunc
def mc_error(x, batches=5):
"""
Calculates the simulation standard error, accounting for non-independent
samples. The trace is divided into batches, and the standard deviation of
the batch means is calculated.
:Arguments:
x : Numpy array
An array containing MCMC samples
batches : integer
Number of batchas
"""
if x.ndim > 1:
dims = np.shape(x)
#ttrace = np.transpose(np.reshape(trace, (dims[0], sum(dims[1:]))))
trace = np.transpose([t.ravel() for t in x])
return np.reshape([mc_error(t, batches) for t in trace], dims[1:])
else:
if batches == 1: return np.std(x)/np.sqrt(len(x))
try:
batched_traces = np.resize(x, (batches, len(x)/batches))
except ValueError:
# If batches do not divide evenly, trim excess samples
resid = len(x) % batches
new_shape = (batches, (len(x) - resid) / batches)
batched_traces = np.resize(x[:-resid], new_shape)
means = np.mean(batched_traces, 1)
return np.std(means)/np.sqrt(batches)
@statfunc
def quantiles(x, qlist=(2.5, 25, 50, 75, 97.5)):
"""Returns a dictionary of requested quantiles from array
:Arguments:
x : Numpy array
An array containing MCMC samples
qlist : tuple or list
A list of desired quantiles (defaults to (2.5, 25, 50, 75, 97.5))
"""
# Make a copy of trace
x = x.copy()
# For multivariate node
if x.ndim > 1:
# Transpose first, then sort, then transpose back
sx = np.sort(x.T).T
else:
# Sort univariate node
sx = np.sort(x)
try:
# Generate specified quantiles
quants = [sx[int(len(sx)*q/100.0)] for q in qlist]
return dict(zip(qlist, quants))
except IndexError:
print("Too few elements for quantile calculation")
def df_summary(trace, varnames=None, stat_funcs=None, extend=False,
alpha=0.05, batches=100):
"""Create a data frame with summary statistics.
Parameters
----------
trace : MultiTrace instance
varnames : list
Names of variables to include in summary
stat_funcs : None or list
A list of functions used to calculate statistics. By default,
the mean, standard deviation, simulation standard error, and
highest posterior density intervals are included.
The functions will be given one argument, the samples for a
variable as a 2 dimensional array, where the first axis
corresponds to sampling iterations and the second axis
represents the flattened variable (e.g., x__0, x__1,...). Each
function should return either
1) A `pandas.Series` instance containing the result of
calculating the statistic along the first axis. The name
attribute will be taken as the name of the statistic.
2) A `pandas.DataFrame` where each column contains the
result of calculating the statistic along the first axis.
The column names will be taken as the names of the
statistics.
extend : boolean
If True, use the statistics returned by `stat_funcs` in
addition to, rather than in place of, the default statistics.
This is only meaningful when `stat_funcs` is not None.
alpha : float
The alpha level for generating posterior intervals. Defaults
to 0.05. This is only meaningful when `stat_funcs` is None.
batches : int
Batch size for calculating standard deviation for
non-independent samples. Defaults to 100. This is only
meaningful when `stat_funcs` is None.
See also
--------
summary : Generate a pretty-printed summary of a trace.
Returns
-------
`pandas.DataFrame` with summary statistics for each variable
Examples
--------
>>> import pymc3 as pm
>>> trace.mu.shape
(1000, 2)
>>> pm.df_summary(trace, ['mu'])
mean sd mc_error hpd_5 hpd_95
mu__0 0.106897 0.066473 0.001818 -0.020612 0.231626
mu__1 -0.046597 0.067513 0.002048 -0.174753 0.081924
Other statistics can be calculated by passing a list of functions.
>>> import pandas as pd
>>> def trace_sd(x):
... return pd.Series(np.std(x, 0), name='sd')
...
>>> def trace_quantiles(x):
... return pd.DataFrame(pm.quantiles(x, [5, 50, 95]))
...
>>> pm.df_summary(trace, ['mu'], stat_funcs=[trace_sd, trace_quantiles])
sd 5 50 95
mu__0 0.066473 0.000312 0.105039 0.214242
mu__1 0.067513 -0.159097 -0.045637 0.062912
"""
if varnames is None:
varnames = trace.varnames
funcs = [lambda x: pd.Series(np.mean(x, 0), name='mean'),
lambda x: pd.Series(np.std(x, 0), name='sd'),
lambda x: pd.Series(mc_error(x, batches), name='mc_error'),
lambda x: _hpd_df(x, alpha)]
if stat_funcs is not None and extend:
stat_funcs = funcs + stat_funcs
elif stat_funcs is None:
stat_funcs = funcs
var_dfs = []
for var in varnames:
vals = trace.get_values(var, combine=True)
flat_vals = vals.reshape(vals.shape[0], -1)
var_df = pd.concat([f(flat_vals) for f in stat_funcs], axis=1)
var_df.index = ttab.create_flat_names(var, vals.shape[1:])
var_dfs.append(var_df)
return pd.concat(var_dfs, axis=0)
def _hpd_df(x, alpha):
cnames = ['hpd_{0:g}'.format(100 * alpha/2),
'hpd_{0:g}'.format(100 * (1 - alpha/2))]
return pd.DataFrame(hpd(x, alpha), columns=cnames)
def summary(trace, varnames=None, alpha=0.05, start=0, batches=100, roundto=3,
to_file=None):
"""
Generate a pretty-printed summary of the node.
:Parameters:
trace : Trace object
Trace containing MCMC sample
varnames : list of strings
List of variables to summarize. Defaults to None, which results
in all variables summarized.
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
roundto : int
The number of digits to round posterior statistics.
tofile : None or string
File to write results to. If not given, print to stdout.
"""
if varnames is None:
varnames = trace.varnames
stat_summ = _StatSummary(roundto, batches, alpha)
pq_summ = _PosteriorQuantileSummary(roundto, alpha)
if to_file is None:
fh = sys.stdout
else:
fh = open(to_file, mode='w')
for var in varnames:
# Extract sampled values
sample = trace.get_values(var, burn=start, combine=True)
fh.write('\n%s:\n\n' % var)
fh.write(stat_summ.output(sample))
fh.write(pq_summ.output(sample))
if fh is not sys.stdout:
fh.close()
class _Summary(object):
"""Base class for summary output"""
def __init__(self, roundto):
self.roundto = roundto
self.header_lines = None
self.leader = ' '
self.spaces = None
self.width = None
def output(self, sample):
return '\n'.join(list(self._get_lines(sample))) + '\n\n'
def _get_lines(self, sample):
for line in self.header_lines:
yield self.leader + line
summary_lines = self._calculate_values(sample)
for line in self._create_value_output(summary_lines):
yield self.leader + line
def _create_value_output(self, lines):
for values in lines:
try:
self._format_values(values)
yield self.value_line.format(pad=self.spaces, **values).strip()
except AttributeError:
# This is a key for the leading indices, not a normal row.
# `values` will be an empty tuple unless it is 2d or above.
if values:
leading_idxs = [str(v) for v in values]
numpy_idx = '[{}, :]'.format(', '.join(leading_idxs))
yield self._create_idx_row(numpy_idx)
else:
yield ''
def _calculate_values(self, sample):
raise NotImplementedError
def _format_values(self, summary_values):
for key, val in summary_values.items():
summary_values[key] = '{:.{ndec}f}'.format(
float(val), ndec=self.roundto)
def _create_idx_row(self, value):
return '{:.^{}}'.format(value, self.width)
class _StatSummary(_Summary):
def __init__(self, roundto, batches, alpha):
super(_StatSummary, self).__init__(roundto)
spaces = 17
hpd_name = '{0:g}% HPD interval'.format(100 * (1 - alpha))
value_line = '{mean:<{pad}}{sd:<{pad}}{mce:<{pad}}{hpd:<{pad}}'
header = value_line.format(mean='Mean', sd='SD', mce='MC Error',
hpd=hpd_name, pad=spaces).strip()
self.width = len(header)
hline = '-' * self.width
self.header_lines = [header, hline]
self.spaces = spaces
self.value_line = value_line
self.batches = batches
self.alpha = alpha
def _calculate_values(self, sample):
return _calculate_stats(sample, self.batches, self.alpha)
def _format_values(self, summary_values):
roundto = self.roundto
for key, val in summary_values.items():
if key == 'hpd':
summary_values[key] = '[{:.{ndec}f}, {:.{ndec}f}]'.format(
*val, ndec=roundto)
else:
summary_values[key] = '{:.{ndec}f}'.format(
float(val), ndec=roundto)
class _PosteriorQuantileSummary(_Summary):
def __init__(self, roundto, alpha):
super(_PosteriorQuantileSummary, self).__init__(roundto)
spaces = 15
title = 'Posterior quantiles:'
value_line = '{lo:<{pad}}{q25:<{pad}}{q50:<{pad}}{q75:<{pad}}{hi:<{pad}}'
lo, hi = 100 * alpha / 2, 100 * (1. - alpha / 2)
qlist = (lo, 25, 50, 75, hi)
header = value_line.format(lo=lo, q25=25, q50=50, q75=75, hi=hi,
pad=spaces).strip()
self.width = len(header)
hline = '|{thin}|{thick}|{thick}|{thin}|'.format(
thin='-' * (spaces - 1), thick='=' * (spaces - 1))
self.header_lines = [title, header, hline]
self.spaces = spaces
self.lo, self.hi = lo, hi
self.qlist = qlist
self.value_line = value_line
def _calculate_values(self, sample):
return _calculate_posterior_quantiles(sample, self.qlist)
def _calculate_stats(sample, batches, alpha):
means = sample.mean(0)
sds = sample.std(0)
mces = mc_error(sample, batches)
intervals = hpd(sample, alpha)
for key, idxs in _groupby_leading_idxs(sample.shape[1:]):
yield key
for idx in idxs:
mean, sd, mce = [stat[idx] for stat in (means, sds, mces)]
interval = intervals[idx].squeeze().tolist()
yield {'mean': mean, 'sd': sd, 'mce': mce, 'hpd': interval}
def _calculate_posterior_quantiles(sample, qlist):
var_quantiles = quantiles(sample, qlist=qlist)
## Replace ends of qlist with 'lo' and 'hi'
qends = {qlist[0]: 'lo', qlist[-1]: 'hi'}
qkeys = {q: qends[q] if q in qends else 'q{}'.format(q) for q in qlist}
for key, idxs in _groupby_leading_idxs(sample.shape[1:]):
yield key
for idx in idxs:
yield {qkeys[q]: var_quantiles[q][idx] for q in qlist}
def _groupby_leading_idxs(shape):
"""Group the indices for `shape` by the leading indices of `shape`.
All dimensions except for the rightmost dimension are used to create
groups.
A 3d shape will be grouped by the indices for the two leading
dimensions.
>>> for key, idxs in _groupby_leading_idxs((3, 2, 2)):
... print('key: {}'.format(key))
... print(list(idxs))
key: (0, 0)
[(0, 0, 0), (0, 0, 1)]
key: (0, 1)
[(0, 1, 0), (0, 1, 1)]
key: (1, 0)
[(1, 0, 0), (1, 0, 1)]
key: (1, 1)
[(1, 1, 0), (1, 1, 1)]
key: (2, 0)
[(2, 0, 0), (2, 0, 1)]
key: (2, 1)
[(2, 1, 0), (2, 1, 1)]
A 1d shape will only have one group.
>>> for key, idxs in _groupby_leading_idxs((2,)):
... print('key: {}'.format(key))
... print(list(idxs))
key: ()
[(0,), (1,)]
"""
idxs = itertools.product(*[range(s) for s in shape])
return itertools.groupby(idxs, lambda x: x[:-1])
| apache-2.0 |
gregcaporaso/scikit-bio | skbio/stats/gradient.py | 2 | 32198 | r"""
Gradient analyses (:mod:`skbio.stats.gradient`)
===============================================
.. currentmodule:: skbio.stats.gradient
This module provides functionality for performing gradient analyses.
The algorithms included in this module mainly allows performing analysis of
volatility on time series data, but they can be applied to any data that
contains a gradient.
Classes
-------
.. autosummary::
:toctree:
GradientANOVA
AverageGradientANOVA
TrajectoryGradientANOVA
FirstDifferenceGradientANOVA
WindowDifferenceGradientANOVA
GroupResults
CategoryResults
GradientANOVAResults
Examples
--------
Assume we have the following coordinates:
>>> import numpy as np
>>> import pandas as pd
>>> from skbio.stats.gradient import AverageGradientANOVA
>>> coord_data = {'PC.354': np.array([0.2761, -0.0341, 0.0633, 0.1004]),
... 'PC.355': np.array([0.2364, 0.2186, -0.0301, -0.0225]),
... 'PC.356': np.array([0.2208, 0.0874, -0.3519, -0.0031]),
... 'PC.607': np.array([-0.1055, -0.4140, -0.15, -0.116]),
... 'PC.634': np.array([-0.3716, 0.1154, 0.0721, 0.0898])}
>>> coords = pd.DataFrame.from_dict(coord_data, orient='index')
the following metadata map:
>>> metadata_map = {'PC.354': {'Treatment': 'Control', 'Weight': '60'},
... 'PC.355': {'Treatment': 'Control', 'Weight': '55'},
... 'PC.356': {'Treatment': 'Control', 'Weight': '50'},
... 'PC.607': {'Treatment': 'Fast', 'Weight': '65'},
... 'PC.634': {'Treatment': 'Fast', 'Weight': '68'}}
>>> metadata_map = pd.DataFrame.from_dict(metadata_map, orient='index')
and the following array with the proportion explained of each coord:
>>> prop_expl = np.array([25.6216, 15.7715, 14.1215, 11.6913, 9.8304])
Then to compute the average trajectory of this data:
>>> av = AverageGradientANOVA(coords, prop_expl, metadata_map,
... trajectory_categories=['Treatment'],
... sort_category='Weight')
>>> trajectory_results = av.get_trajectories()
Check the algorithm used to compute the trajectory_results:
>>> print(trajectory_results.algorithm)
avg
Check if we weighted the data or not:
>>> print(trajectory_results.weighted)
False
Check the results of one of the categories:
>>> print(trajectory_results.categories[0].category)
Treatment
>>> print(trajectory_results.categories[0].probability)
0.0118478282382
Check the results of one group of one of the categories:
>>> print(trajectory_results.categories[0].groups[0].name)
Control
>>> print(trajectory_results.categories[0].groups[0].trajectory)
[ 3.52199973 2.29597001 3.20309816]
>>> print(trajectory_results.categories[0].groups[0].info)
{'avg': 3.007022633956606}
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from copy import deepcopy
from collections import defaultdict
from numbers import Integral
import numpy as np
from natsort import realsorted
from scipy.stats import f_oneway
from skbio.util._decorator import experimental
def _weight_by_vector(trajectories, w_vector):
r"""weights the values of `trajectories` given a weighting vector
`w_vector`.
Each value in `trajectories` will be weighted by the 'rate of change'
to 'optimal rate of change' ratio. The 'rate of change' of a vector
measures how each point in the vector changes with respect to its
predecessor point. The 'optimal rate of change' is the rate of change
in which each point in the vector performs the same change than its
predecessor, meaning that when calling this function over evenly spaced
`w_vector` values, no change will be reflected on the output.
Parameters
----------
trajectories: pandas.DataFrame
Values to weight
w_vector: pandas.Series
Values used to weight `trajectories`
Returns
-------
pandas.DataFrame
A weighted version of `trajectories`.
Raises
------
ValueError
If `trajectories` and `w_vector` don't have equal lengths
If `w_vector` is not a gradient
TypeError
If `trajectories` and `w_vector` are not iterables
"""
try:
if len(trajectories) != len(w_vector):
raise ValueError("trajectories (%d) & w_vector (%d) must be equal "
"lengths" % (len(trajectories), len(w_vector)))
except TypeError:
raise TypeError("trajectories and w_vector must be iterables")
# check no repeated values are passed in the weighting vector
if len(set(w_vector)) != len(w_vector):
raise ValueError("The weighting vector must be a gradient")
# no need to weight in case of a one element vector
if len(w_vector) == 1:
return trajectories
# Cast to float so divisions have a floating point resolution
total_length = float(max(w_vector) - min(w_vector))
# Reflects the expected gradient between subsequent values in w_vector
# the first value isn't weighted so subtract one from the number of
# elements
optimal_gradient = total_length/(len(w_vector)-1)
# for all elements apply the weighting function
for i, idx in enumerate(trajectories.index):
# Skipping the first element is it doesn't need to be weighted
if i != 0:
trajectories.loc[idx] = (
trajectories.loc[idx] * optimal_gradient /
np.abs((w_vector[i] - w_vector[i-1]))
)
return trajectories
def _ANOVA_trajectories(category, res_by_group):
r"""Run ANOVA over `res_by_group`
If ANOVA cannot be run in the current category (because either there is
only one group in category or there is a group with only one member)
the result CategoryResults instance has `probability` and `groups` set
to None and message is set to a string explaining why ANOVA was not run
Returns
-------
CategoryResults
An instance of CategoryResults holding the results of the trajectory
analysis applied on `category`
"""
# If there is only one group under category we cannot run ANOVA
if len(res_by_group) == 1:
return CategoryResults(category, None, None,
'Only one value in the group.')
# Check if groups can be tested using ANOVA. ANOVA testing requires
# all elements to have at least size greater to one.
values = [res.trajectory.astype(float) for res in res_by_group]
if any([len(value) == 1 for value in values]):
return CategoryResults(category, None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
# We are ok to run ANOVA
_, p_val = f_oneway(*values)
return CategoryResults(category, p_val, res_by_group, None)
class GroupResults:
"""Store the trajectory results of a group of a metadata category
Attributes
----------
name : str
The name of the group within the metadata category
trajectory : array like
The result trajectory in an 1-D numpy array
mean : float
The mean of the trajectory
info : dict
Any extra information computed by the trajectory algorithm. Depends on
the algorithm
message : str
A message with information of the execution of the algorithm
"""
@experimental(as_of="0.4.0")
def __init__(self, name, trajectory, mean, info, message):
self.name = name
self.trajectory = trajectory
self.mean = mean
self.info = info
self.message = message
@experimental(as_of="0.4.0")
def to_files(self, out_f, raw_f):
r"""Save the trajectory analysis results for a category group to files
in text format.
Parameters
----------
out_f : file-like object
File-like object to write trajectory analysis data to. Must have a
`write` method. It is the caller's responsibility to close
`out_f` when done (if necessary)
raw_f : file-like object
File-like object to write trajectories trajectory values. Must have
a `write` method. It is the caller's responsibility to close
`out_f` when done (if necessary)
"""
out_f.write('For group "%s", the group means is: %f\n'
% (self.name, self.mean))
raw_f.write('For group "%s":\n' % self.name)
if self.message:
out_f.write('%s\n' % self.message)
raw_f.write('%s\n' % self.message)
out_f.write('The info is: %s\n'
% sorted(((k, v) for k, v in self.info.items())))
raw_f.write('The trajectory is:\n[%s]\n'
% ", ".join(map(str, self.trajectory)))
class CategoryResults:
"""Store the trajectory results of a metadata category
Attributes
----------
category : str
The name of the category
probability : float
The ANOVA probability that the category groups are independent
groups : list of GroupResults
The trajectory results for each group in the category
message : str
A message with information of the execution of the algorithm
"""
@experimental(as_of="0.4.0")
def __init__(self, category, probability, groups, message):
self.category = category
self.probability = probability
self.groups = groups
self.message = message
@experimental(as_of="0.4.0")
def to_files(self, out_f, raw_f):
r"""Save the trajectory analysis results for a category to files in
text format.
Parameters
----------
out_f : file-like object
File-like object to write trajectory analysis data to. Must have a
`write` method. It is the caller's responsibility to close `out_f`
when done (if necessary)
raw_f : file-like object
File-like object to write trajectory raw values. Must have a
`write` method. It is the caller's responsibility to close `out_f`
when done (if necessary)
"""
if self.probability is None:
out_f.write('Grouped by "%s": %s\n'
% (self.category, self.message))
else:
out_f.write('Grouped by "%s", probability: %f\n'
% (self.category, self.probability))
raw_f.write('Grouped by "%s"\n' % self.category)
for group in self.groups:
group.to_files(out_f, raw_f)
class GradientANOVAResults:
"""Store the trajectory results
Attributes
----------
algorithm : str
The algorithm used to compute trajectories
weighted : bool
If true, a weighting vector was used
categories : list of CategoryResults
The trajectory results for each metadata category
"""
@experimental(as_of="0.4.0")
def __init__(self, algorithm, weighted, categories):
self.algorithm = algorithm
self.weighted = weighted
self.categories = categories
@experimental(as_of="0.4.0")
def to_files(self, out_f, raw_f):
r"""Save the trajectory analysis results to files in text format.
Parameters
----------
out_f : file-like object
File-like object to write trajectories analysis data to. Must have
a `write` method. It is the caller's responsibility to close
`out_f` when done (if necessary)
raw_f : file-like object
File-like object to write trajectories raw values. Must have a
`write` method. It is the caller's responsibility to close `out_f`
when done (if necessary)
"""
out_f.write('Trajectory algorithm: %s\n' % self.algorithm)
raw_f.write('Trajectory algorithm: %s\n' % self.algorithm)
if self.weighted:
out_f.write('** This output is weighted **\n')
raw_f.write('** This output is weighted **\n')
out_f.write('\n')
raw_f.write('\n')
for cat_results in self.categories:
cat_results.to_files(out_f, raw_f)
out_f.write('\n')
raw_f.write('\n')
class GradientANOVA:
r"""Base class for the Trajectory algorithms
Parameters
----------
coords : pandas.DataFrame
The coordinates for each sample id
prop_expl : array like
The numpy 1-D array with the proportion explained by each axis in
coords
metadata_map : pandas.DataFrame
The metadata map, indexed by sample ids and columns are metadata
categories
trajectory_categories : list of str, optional
A list of metadata categories to use to create the trajectories. If
None is passed, the trajectories for all metadata categories are
computed. Default: None, compute all of them
sort_category : str, optional
The metadata category to use to sort the trajectories. Default: None
axes : int, optional
The number of axes to account while doing the trajectory specific
calculations. Pass 0 to compute all of them. Default: 3
weighted : bool, optional
If true, the output is weighted by the space between samples in the
`sort_category` column
Raises
------
ValueError
If any category of `trajectory_categories` is not present in
`metadata_map`
If `sort_category` is not present in `metadata_map`
If `axes` is not between 0 and the maximum number of axes available
If `weighted` is True and no `sort_category` is provided
If `weighted` is True and the values under `sort_category` are not
numerical
If `coords` and `metadata_map` does not have samples in common
"""
# Should be defined by the derived classes
_alg_name = None
@experimental(as_of="0.4.0")
def __init__(self, coords, prop_expl, metadata_map,
trajectory_categories=None, sort_category=None, axes=3,
weighted=False):
if not trajectory_categories:
# If trajectory_categories is not provided, use all the categories
# present in the metadata map
trajectory_categories = metadata_map.keys()
else:
# Check that trajectory_categories are in metadata_map
for category in trajectory_categories:
if category not in metadata_map:
raise ValueError("Category %s not present in metadata."
% category)
# Check that sort_categories is in metadata_map
if sort_category and sort_category not in metadata_map:
raise ValueError("Sort category %s not present in metadata."
% sort_category)
if axes == 0:
# If axes == 0, we should compute the trajectories for all axes
axes = len(prop_expl)
elif axes > len(prop_expl) or axes < 0:
# Axes should be 0 <= axes <= len(prop_expl)
raise ValueError("axes should be between 0 and the max number of "
"axes available (%d), found: %d "
% (len(prop_expl), axes))
# Restrict coordinates to those axes that we actually need to compute
self._coords = coords.loc[:, :axes-1]
self._prop_expl = prop_expl[:axes]
self._metadata_map = metadata_map
self._weighted = weighted
# Remove any samples from coords not present in mapping file
# and remove any samples from metadata_map not present in coords
self._normalize_samples()
# Create groups
self._make_groups(trajectory_categories, sort_category)
# Compute the weighting_vector
self._weighting_vector = None
if weighted:
if not sort_category:
raise ValueError("You should provide a sort category if you "
"want to weight the trajectories")
try:
self._weighting_vector = \
self._metadata_map[sort_category].astype(np.float64)
except ValueError:
raise ValueError("The sorting category must be numeric")
# Initialize the message buffer
self._message_buffer = []
@experimental(as_of="0.4.0")
def get_trajectories(self):
r"""Compute the trajectories for each group in each category and run
ANOVA over the results to test group independence.
Returns
-------
GradientANOVAResults
An instance of GradientANOVAResults holding the results.
"""
result = GradientANOVAResults(self._alg_name, self._weighted, [])
# Loop through all the categories that we should compute
# the trajectories
for cat, cat_groups in self._groups.items():
# Loop through all the category values present in the current
# category and compute the trajectory for each of them
res_by_group = []
for group in sorted(cat_groups, key=lambda k: str(k)):
res_by_group.append(
self._get_group_trajectories(group, cat_groups[group]))
result.categories.append(_ANOVA_trajectories(cat, res_by_group))
return result
def _normalize_samples(self):
r"""Ensures that `self._coords` and `self._metadata_map` have the same
sample ids
Raises
------
ValueError
If `coords` and `metadata_map` does not have samples in common
"""
# Figure out the sample ids in common
coords_sample_ids = set(self._coords.index)
mm_sample_ids = set(self._metadata_map.index)
sample_ids = coords_sample_ids.intersection(mm_sample_ids)
# Check if they actually have sample ids in common
if not sample_ids:
raise ValueError("Coordinates and metadata map had no samples "
"in common")
# Need to take a subset of coords
if coords_sample_ids != sample_ids:
self._coords = self._coords.loc[sample_ids]
# Need to take a subset of metadata_map
if mm_sample_ids != sample_ids:
self._metadata_map = self._metadata_map.loc[sample_ids]
def _make_groups(self, trajectory_categories, sort_category):
r"""Groups the sample ids in `self._metadata_map` by the values in
`trajectory_categories`
Creates `self._groups`, a dictionary keyed by category and values are
dictionaries in which the keys represent the group name within the
category and values are ordered lists of sample ids
If `sort_category` is not None, the sample ids are sorted based on the
values under this category in the metadata map. Otherwise, they are
sorted using the sample id.
Parameters
----------
trajectory_categories : list of str
A list of metadata categories to use to create the groups.
Default: None, compute all of them
sort_category : str or None
The category from self._metadata_map to use to sort groups
"""
# If sort_category is provided, we used the value of such category to
# sort. Otherwise, we use the sample id.
if sort_category:
def sort_val(sid):
return self._metadata_map[sort_category][sid]
else:
def sort_val(sid):
return sid
self._groups = defaultdict(dict)
for cat in trajectory_categories:
# Group samples by category
gb = self._metadata_map.groupby(cat)
for g, df in gb:
self._groups[cat][g] = realsorted(df.index, key=sort_val)
def _get_group_trajectories(self, group_name, sids):
r"""Compute the trajectory results for `group_name` containing the
samples `sids`.
Weights the data if `self._weighted` is True and ``len(sids) > 1``
Parameters
----------
group_name : str
The name of the group
sids : list of str
The sample ids in the group
Returns
-------
GroupResults
The trajectory results for the given group
Raises
------
RuntimeError
If sids is an empty list
"""
# We multiply the coord values with the prop_expl
trajectories = self._coords.loc[sids] * self._prop_expl
if trajectories.empty:
# Raising a RuntimeError since in a usual execution this should
# never happen. The only way this can happen is if the user
# directly calls this method, which shouldn't be done
# (that's why the method is private)
raise RuntimeError("No samples to process, an empty list cannot "
"be processed")
# The weighting can only be done over trajectories with a length
# greater than 1
if self._weighted and len(sids) > 1:
trajectories_copy = deepcopy(trajectories)
try:
trajectories = _weight_by_vector(trajectories_copy,
self._weighting_vector[sids])
except (FloatingPointError, ValueError):
self._message_buffer.append("Could not weight group, no "
"gradient in the the "
"weighting vector.\n")
trajectories = trajectories_copy
return self._compute_trajectories_results(group_name,
trajectories.loc[sids])
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectories computation over trajectories
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Raises
------
NotImplementedError
This is the base class
"""
raise NotImplementedError("No algorithm is implemented on the base "
"class.")
class AverageGradientANOVA(GradientANOVA):
r"""Perform trajectory analysis using the RMS average algorithm
For each group in a category, it computes the average point among the
samples in such group and then computes the norm of each sample from the
averaged one.
See Also
--------
GradientANOVA
"""
_alg_name = 'avg'
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectory computation over trajectories
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Returns
-------
GroupResults
The trajectory results for `group_name` using the average
trajectories method
"""
center = np.average(trajectories, axis=0)
if len(trajectories) == 1:
trajectory = np.array([np.linalg.norm(center)])
calc = {'avg': trajectory[0]}
else:
trajectory = np.array([np.linalg.norm(row[1].to_numpy() - center)
for row in trajectories.iterrows()])
calc = {'avg': np.average(trajectory)}
msg = ''.join(self._message_buffer) if self._message_buffer else None
# Reset the message buffer
self._message_buffer = []
return GroupResults(group_name, trajectory, np.mean(trajectory),
calc, msg)
class TrajectoryGradientANOVA(GradientANOVA):
r"""Perform trajectory analysis using the RMS trajectory algorithm
For each group in a category, each component of the result trajectory is
computed as taking the sorted list of samples in the group and taking the
norm of the coordinates of the 2nd sample minus 1st sample, 3rd sample
minus 2nd sample and so on.
See Also
--------
GradientANOVA
"""
_alg_name = 'trajectory'
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectory computation over trajectories
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Returns
-------
GroupResults
The trajectory results for `group_name` using the trajectory
method
"""
if len(trajectories) == 1:
trajectory = np.array([np.linalg.norm(trajectories)])
calc = {'2-norm': trajectory[0]}
else:
# Loop through all the rows in trajectories and create '2-norm'
# by taking the norm of the 2nd row - 1st row, 3rd row - 2nd row...
trajectory = \
np.array([np.linalg.norm(trajectories.iloc[i+1].to_numpy() -
trajectories.iloc[i].to_numpy())
for i in range(len(trajectories) - 1)])
calc = {'2-norm': np.linalg.norm(trajectory)}
msg = ''.join(self._message_buffer) if self._message_buffer else None
# Reset the message buffer
self._message_buffer = []
return GroupResults(group_name, trajectory, np.mean(trajectory),
calc, msg)
class FirstDifferenceGradientANOVA(GradientANOVA):
r"""Perform trajectory analysis using the first difference algorithm
It calculates the norm for all the time-points and then calculates the
first difference for each resulting point
See Also
--------
GradientANOVA
"""
_alg_name = 'diff'
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectory computation over trajectories
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Returns
-------
GroupResults
The trajectory results for `group_name` using the first difference
method
"""
if len(trajectories) == 1:
trajectory = np.array([np.linalg.norm(trajectories)])
calc = {'mean': trajectory[0], 'std': 0}
elif len(trajectories) == 2:
trajectory = np.array([np.linalg.norm(trajectories[1] -
trajectories[0])])
calc = {'mean': trajectory[0], 'std': 0}
else:
vec_norm = \
np.array([np.linalg.norm(trajectories.iloc[i+1].to_numpy() -
trajectories.iloc[i].to_numpy())
for i in range(len(trajectories) - 1)])
trajectory = np.diff(vec_norm)
calc = {'mean': np.mean(trajectory), 'std': np.std(trajectory)}
msg = ''.join(self._message_buffer) if self._message_buffer else None
# Reset the message buffer
self._message_buffer = []
return GroupResults(group_name, trajectory, np.mean(trajectory),
calc, msg)
class WindowDifferenceGradientANOVA(GradientANOVA):
r"""Perform trajectory analysis using the modified first difference
algorithm
It calculates the norm for all the time-points and subtracts the mean of
the next number of elements specified in `window_size` and the current
element.
Parameters
----------
coords : pandas.DataFrame
The coordinates for each sample id
prop_expl : array like
The numpy 1-D array with the proportion explained by each axis in
coords
metadata_map : pandas.DataFrame
The metadata map, indexed by sample ids and columns are metadata
categories
window_size : int or long
The window size to use while computing the differences
Raises
------
ValueError
If the window_size is not a positive integer
See Also
--------
GradientANOVA
"""
_alg_name = 'wdiff'
@experimental(as_of="0.4.0")
def __init__(self, coords, prop_expl, metadata_map, window_size, **kwargs):
super(WindowDifferenceGradientANOVA, self).__init__(coords, prop_expl,
metadata_map,
**kwargs)
if not isinstance(window_size, Integral) or window_size < 1:
raise ValueError("The window_size must be a positive integer")
self._window_size = window_size
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectory computation over trajectories
If the first difference cannot be calculated of the provided window
size, no difference is applied and a message is added to the results.
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Returns
-------
GroupResults
The trajectory results for `group_name` using the windowed
difference method
"""
if len(trajectories) == 1:
trajectory = np.array([np.linalg.norm(trajectories)])
calc = {'mean': trajectory, 'std': 0}
elif len(trajectories) == 2:
trajectory = np.array([np.linalg.norm(trajectories[1] -
trajectories[0])])
calc = {'mean': trajectory, 'std': 0}
else:
vec_norm = \
np.array([np.linalg.norm(trajectories.iloc[i+1].to_numpy() -
trajectories.iloc[i].to_numpy())
for i in range(len(trajectories) - 1)])
# windowed first differences won't be able on every group,
# specially given the variation of size that a trajectory tends
# to have
if len(vec_norm) <= self._window_size:
trajectory = vec_norm
self._message_buffer.append("Cannot calculate the first "
"difference with a window of size "
"(%d)." % self._window_size)
else:
# Replicate the last element as many times as required
for idx in range(0, self._window_size):
vec_norm = np.append(vec_norm, vec_norm[-1:], axis=0)
trajectory = []
for idx in range(0, len(vec_norm) - self._window_size):
# Meas has to be over axis 0 so it handles arrays of arrays
element = np.mean(vec_norm[(idx + 1):
(idx + 1 + self._window_size)],
axis=0)
trajectory.append(element - vec_norm[idx])
trajectory = np.array(trajectory)
calc = {'mean': np.mean(trajectory), 'std': np.std(trajectory)}
msg = ''.join(self._message_buffer) if self._message_buffer else None
# Reset the message buffer
self._message_buffer = []
return GroupResults(group_name, trajectory, np.mean(trajectory),
calc, msg)
| bsd-3-clause |
geopandas/geopandas | geopandas/sindex.py | 1 | 21624 | import warnings
from shapely.geometry.base import BaseGeometry
import pandas as pd
import numpy as np
from . import _compat as compat
from ._decorator import doc
def _get_sindex_class():
"""Dynamically chooses a spatial indexing backend.
Required to comply with _compat.USE_PYGEOS.
The selection order goes PyGEOS > RTree > Error.
"""
if compat.USE_PYGEOS:
return PyGEOSSTRTreeIndex
if compat.HAS_RTREE:
return RTreeIndex
raise ImportError(
"Spatial indexes require either `rtree` or `pygeos`. "
"See installation instructions at https://geopandas.org/install.html"
)
class BaseSpatialIndex:
@property
def valid_query_predicates(self):
"""Returns valid predicates for this spatial index.
Returns
-------
set
Set of valid predicates for this spatial index.
Examples
--------
>>> from shapely.geometry import Point
>>> s = geopandas.GeoSeries([Point(0, 0), Point(1, 1)])
>>> s.sindex.valid_query_predicates # doctest: +SKIP
{'contains', 'crosses', 'intersects', 'within', 'touches', \
'overlaps', None, 'covers', 'contains_properly'}
"""
raise NotImplementedError
def query(self, geometry, predicate=None, sort=False):
"""Return the index of all geometries in the tree with extents that
intersect the envelope of the input geometry.
When using the ``rtree`` package, this is not a vectorized function.
If speed is important, please use PyGEOS.
Parameters
----------
geometry : shapely geometry
A single shapely geometry to query against the spatial index.
predicate : {None, 'intersects', 'within', 'contains', \
'overlaps', 'crosses', 'touches'}, optional
If predicate is provided, the input geometry is
tested using the predicate function against each item
in the tree whose extent intersects the envelope of the
input geometry: predicate(input_geometry, tree_geometry).
If possible, prepared geometries are used to help
speed up the predicate operation.
sort : bool, default False
If True, the results will be sorted in ascending order.
If False, results are often sorted but there is no guarantee.
Returns
-------
matches : ndarray of shape (n_results, )
Integer indices for matching geometries from the spatial index.
Examples
--------
>>> from shapely.geometry import Point, box
>>> s = geopandas.GeoSeries(geopandas.points_from_xy(range(10), range(10)))
>>> s
0 POINT (0.00000 0.00000)
1 POINT (1.00000 1.00000)
2 POINT (2.00000 2.00000)
3 POINT (3.00000 3.00000)
4 POINT (4.00000 4.00000)
5 POINT (5.00000 5.00000)
6 POINT (6.00000 6.00000)
7 POINT (7.00000 7.00000)
8 POINT (8.00000 8.00000)
9 POINT (9.00000 9.00000)
dtype: geometry
>>> s.sindex.query(box(1, 1, 3, 3))
array([1, 2, 3])
>>> s.sindex.query(box(1, 1, 3, 3), predicate="contains")
array([2])
"""
raise NotImplementedError
def query_bulk(self, geometry, predicate=None, sort=False):
"""
Returns all combinations of each input geometry and geometries in
the tree where the envelope of each input geometry intersects with
the envelope of a tree geometry.
In the context of a spatial join, input geometries are the “left”
geometries that determine the order of the results, and tree geometries
are “right” geometries that are joined against the left geometries.
This effectively performs an inner join, where only those combinations
of geometries that can be joined based on envelope overlap or optional
predicate are returned.
When using the ``rtree`` package, this is not a vectorized function
and may be slow. If speed is important, please use PyGEOS.
Parameters
----------
geometry : {GeoSeries, GeometryArray, numpy.array of PyGEOS geometries}
Accepts GeoPandas geometry iterables (GeoSeries, GeometryArray)
or a numpy array of PyGEOS geometries.
predicate : {None, 'intersects', 'within', 'contains', 'overlaps', \
'crosses', 'touches'}, optional
If predicate is provided, the input geometries are tested using
the predicate function against each item in the tree whose extent
intersects the envelope of the each input geometry:
predicate(input_geometry, tree_geometry). If possible, prepared
geometries are used to help speed up the predicate operation.
sort : bool, default False
If True, results sorted lexicographically using
geometry's indexes as the primary key and the sindex's indexes as the
secondary key. If False, no additional sorting is applied.
Returns
-------
ndarray with shape (2, n)
The first subarray contains input geometry integer indexes.
The second subarray contains tree geometry integer indexes.
Examples
--------
>>> from shapely.geometry import Point, box
>>> s = geopandas.GeoSeries(geopandas.points_from_xy(range(10), range(10)))
>>> s
0 POINT (0.00000 0.00000)
1 POINT (1.00000 1.00000)
2 POINT (2.00000 2.00000)
3 POINT (3.00000 3.00000)
4 POINT (4.00000 4.00000)
5 POINT (5.00000 5.00000)
6 POINT (6.00000 6.00000)
7 POINT (7.00000 7.00000)
8 POINT (8.00000 8.00000)
9 POINT (9.00000 9.00000)
dtype: geometry
>>> s2 = geopandas.GeoSeries([box(2, 2, 4, 4), box(5, 5, 6, 6)])
>>> s2
0 POLYGON ((4.00000 2.00000, 4.00000 4.00000, 2....
1 POLYGON ((6.00000 5.00000, 6.00000 6.00000, 5....
dtype: geometry
>>> s.sindex.query_bulk(s2)
array([[0, 0, 0, 1, 1],
[2, 3, 4, 5, 6]])
>>> s.sindex.query_bulk(s2, predicate="contains")
array([[0],
[3]])
"""
raise NotImplementedError
def intersection(self, coordinates):
"""Compatibility wrapper for rtree.index.Index.intersection,
use ``query`` intead.
Parameters
----------
coordinates : sequence or array
Sequence of the form (min_x, min_y, max_x, max_y)
to query a rectangle or (x, y) to query a point.
Examples
--------
>>> from shapely.geometry import Point, box
>>> s = geopandas.GeoSeries(geopandas.points_from_xy(range(10), range(10)))
>>> s
0 POINT (0.00000 0.00000)
1 POINT (1.00000 1.00000)
2 POINT (2.00000 2.00000)
3 POINT (3.00000 3.00000)
4 POINT (4.00000 4.00000)
5 POINT (5.00000 5.00000)
6 POINT (6.00000 6.00000)
7 POINT (7.00000 7.00000)
8 POINT (8.00000 8.00000)
9 POINT (9.00000 9.00000)
dtype: geometry
>>> s.sindex.intersection(box(1, 1, 3, 3).bounds)
array([1, 2, 3])
Alternatively, you can use ``query``:
>>> s.sindex.query(box(1, 1, 3, 3))
array([1, 2, 3])
"""
raise NotImplementedError
@property
def size(self):
"""Size of the spatial index
Number of leaves (input geometries) in the index.
Examples
--------
>>> from shapely.geometry import Point
>>> s = geopandas.GeoSeries(geopandas.points_from_xy(range(10), range(10)))
>>> s
0 POINT (0.00000 0.00000)
1 POINT (1.00000 1.00000)
2 POINT (2.00000 2.00000)
3 POINT (3.00000 3.00000)
4 POINT (4.00000 4.00000)
5 POINT (5.00000 5.00000)
6 POINT (6.00000 6.00000)
7 POINT (7.00000 7.00000)
8 POINT (8.00000 8.00000)
9 POINT (9.00000 9.00000)
dtype: geometry
>>> s.sindex.size
10
"""
raise NotImplementedError
@property
def is_empty(self):
"""Check if the spatial index is empty
Examples
--------
>>> from shapely.geometry import Point
>>> s = geopandas.GeoSeries(geopandas.points_from_xy(range(10), range(10)))
>>> s
0 POINT (0.00000 0.00000)
1 POINT (1.00000 1.00000)
2 POINT (2.00000 2.00000)
3 POINT (3.00000 3.00000)
4 POINT (4.00000 4.00000)
5 POINT (5.00000 5.00000)
6 POINT (6.00000 6.00000)
7 POINT (7.00000 7.00000)
8 POINT (8.00000 8.00000)
9 POINT (9.00000 9.00000)
dtype: geometry
>>> s.sindex.is_empty
False
>>> s2 = geopandas.GeoSeries()
>>> s2.sindex.is_empty
True
"""
raise NotImplementedError
if compat.HAS_RTREE:
import rtree.index # noqa
from rtree.core import RTreeError # noqa
from shapely.prepared import prep # noqa
class SpatialIndex(rtree.index.Index, BaseSpatialIndex):
"""Original rtree wrapper, kept for backwards compatibility."""
def __init__(self, *args):
warnings.warn(
"Directly using SpatialIndex is deprecated, and the class will be "
"removed in a future version. Access the spatial index through the "
"`GeoSeries.sindex` attribute, or use `rtree.index.Index` directly.",
FutureWarning,
stacklevel=2,
)
super().__init__(*args)
@doc(BaseSpatialIndex.intersection)
def intersection(self, coordinates, *args, **kwargs):
return super().intersection(coordinates, *args, **kwargs)
@property
@doc(BaseSpatialIndex.size)
def size(self):
return len(self.leaves()[0][1])
@property
@doc(BaseSpatialIndex.is_empty)
def is_empty(self):
if len(self.leaves()) > 1:
return False
return self.size < 1
class RTreeIndex(rtree.index.Index):
"""A simple wrapper around rtree's RTree Index
Parameters
----------
geometry : np.array of Shapely geometries
Geometries from which to build the spatial index.
"""
def __init__(self, geometry):
stream = (
(i, item.bounds, None)
for i, item in enumerate(geometry)
if pd.notnull(item) and not item.is_empty
)
try:
super().__init__(stream)
except RTreeError:
# What we really want here is an empty generator error, or
# for the bulk loader to log that the generator was empty
# and move on.
# See https://github.com/Toblerity/rtree/issues/20.
super().__init__()
# store reference to geometries for predicate queries
self.geometries = geometry
# create a prepared geometry cache
self._prepared_geometries = np.array(
[None] * self.geometries.size, dtype=object
)
@property
@doc(BaseSpatialIndex.valid_query_predicates)
def valid_query_predicates(self):
return {
None,
"intersects",
"within",
"contains",
"overlaps",
"crosses",
"touches",
"covers",
"contains_properly",
}
@doc(BaseSpatialIndex.query)
def query(self, geometry, predicate=None, sort=False):
# handle invalid predicates
if predicate not in self.valid_query_predicates:
raise ValueError(
"Got `predicate` = `{}`, `predicate` must be one of {}".format(
predicate, self.valid_query_predicates
)
)
# handle empty / invalid geometries
if geometry is None:
# return an empty integer array, similar to pygeys.STRtree.query.
return np.array([], dtype=np.intp)
if not isinstance(geometry, BaseGeometry):
raise TypeError(
"Got `geometry` of type `{}`, `geometry` must be ".format(
type(geometry)
)
+ "a shapely geometry."
)
if geometry.is_empty:
return np.array([], dtype=np.intp)
# query tree
bounds = geometry.bounds # rtree operates on bounds
tree_idx = list(self.intersection(bounds))
if not tree_idx:
return np.array([], dtype=np.intp)
# Check predicate
# This is checked as input_geometry.predicate(tree_geometry)
# When possible, we use prepared geometries.
# Prepared geometries only support "intersects" and "contains"
# For the special case of "within", we are able to flip the
# comparison and check if tree_geometry.contains(input_geometry)
# to still take advantage of prepared geometries.
if predicate == "within":
# To use prepared geometries for within,
# we compare tree_geom.contains(input_geom)
# Since we are preparing the tree geometries,
# we cache them for multiple comparisons.
res = []
for index_in_tree in tree_idx:
if self._prepared_geometries[index_in_tree] is None:
# if not already prepared, prepare and cache
self._prepared_geometries[index_in_tree] = prep(
self.geometries[index_in_tree]
)
if self._prepared_geometries[index_in_tree].contains(geometry):
res.append(index_in_tree)
tree_idx = res
elif predicate is not None:
# For the remaining predicates,
# we compare input_geom.predicate(tree_geom)
if predicate in (
"contains",
"intersects",
"covers",
"contains_properly",
):
# prepare this input geometry
geometry = prep(geometry)
tree_idx = [
index_in_tree
for index_in_tree in tree_idx
if getattr(geometry, predicate)(self.geometries[index_in_tree])
]
# sort if requested
if sort:
# sorted
return np.sort(np.array(tree_idx, dtype=np.intp))
# unsorted
return np.array(tree_idx, dtype=np.intp)
@doc(BaseSpatialIndex.query_bulk)
def query_bulk(self, geometry, predicate=None, sort=False):
# Iterates over geometry, applying func.
tree_index = []
input_geometry_index = []
for i, geo in enumerate(geometry):
res = self.query(geo, predicate=predicate, sort=sort)
tree_index.extend(res)
input_geometry_index.extend([i] * len(res))
return np.vstack([input_geometry_index, tree_index])
@doc(BaseSpatialIndex.intersection)
def intersection(self, coordinates):
return super().intersection(coordinates, objects=False)
@property
@doc(BaseSpatialIndex.size)
def size(self):
if hasattr(self, "_size"):
size = self._size
else:
# self.leaves are lists of tuples of (int, lists...)
# index [0][1] always has an element, even for empty sindex
# for an empty index, it will be an empty list
size = len(self.leaves()[0][1])
self._size = size
return size
@property
@doc(BaseSpatialIndex.is_empty)
def is_empty(self):
return self.geometries.size == 0 or self.size == 0
def __len__(self):
return self.size
if compat.HAS_PYGEOS:
from . import geoseries # noqa
from . import array # noqa
import pygeos # noqa
class PyGEOSSTRTreeIndex(pygeos.STRtree):
"""A simple wrapper around pygeos's STRTree.
Parameters
----------
geometry : np.array of PyGEOS geometries
Geometries from which to build the spatial index.
"""
def __init__(self, geometry):
# set empty geometries to None to avoid segfault on GEOS <= 3.6
# see:
# https://github.com/pygeos/pygeos/issues/146
# https://github.com/pygeos/pygeos/issues/147
non_empty = geometry.copy()
non_empty[pygeos.is_empty(non_empty)] = None
# set empty geometries to None to mantain indexing
super().__init__(non_empty)
# store geometries, including empty geometries for user access
self.geometries = geometry.copy()
@property
def valid_query_predicates(self):
"""Returns valid predicates for the used spatial index.
Returns
-------
set
Set of valid predicates for this spatial index.
Examples
--------
>>> from shapely.geometry import Point
>>> s = geopandas.GeoSeries([Point(0, 0), Point(1, 1)])
>>> s.sindex.valid_query_predicates # doctest: +SKIP
{'contains', 'crosses', 'covered_by', None, 'intersects', 'within', \
'touches', 'overlaps', 'contains_properly', 'covers'}
"""
return {p.name for p in pygeos.strtree.BinaryPredicate} | set([None])
@doc(BaseSpatialIndex.query)
def query(self, geometry, predicate=None, sort=False):
if predicate not in self.valid_query_predicates:
raise ValueError(
"Got `predicate` = `{}`; ".format(predicate)
+ "`predicate` must be one of {}".format(
self.valid_query_predicates
)
)
if isinstance(geometry, BaseGeometry):
geometry = array._shapely_to_geom(geometry)
matches = super().query(geometry=geometry, predicate=predicate)
if sort:
return np.sort(matches)
return matches
@doc(BaseSpatialIndex.query_bulk)
def query_bulk(self, geometry, predicate=None, sort=False):
if predicate not in self.valid_query_predicates:
raise ValueError(
"Got `predicate` = `{}`, `predicate` must be one of {}".format(
predicate, self.valid_query_predicates
)
)
if isinstance(geometry, geoseries.GeoSeries):
geometry = geometry.values.data
elif isinstance(geometry, array.GeometryArray):
geometry = geometry.data
elif not isinstance(geometry, np.ndarray):
geometry = np.asarray(geometry)
res = super().query_bulk(geometry, predicate)
if sort:
# sort by first array (geometry) and then second (tree)
geo_res, tree_res = res
indexing = np.lexsort((tree_res, geo_res))
return np.vstack((geo_res[indexing], tree_res[indexing]))
return res
@doc(BaseSpatialIndex.intersection)
def intersection(self, coordinates):
# convert bounds to geometry
# the old API uses tuples of bound, but pygeos uses geometries
try:
iter(coordinates)
except TypeError:
# likely not an iterable
# this is a check that rtree does, we mimic it
# to ensure a useful failure message
raise TypeError(
"Invalid coordinates, must be iterable in format "
"(minx, miny, maxx, maxy) (for bounds) or (x, y) (for points). "
"Got `coordinates` = {}.".format(coordinates)
)
# need to convert tuple of bounds to a geometry object
if len(coordinates) == 4:
indexes = super().query(pygeos.box(*coordinates))
elif len(coordinates) == 2:
indexes = super().query(pygeos.points(*coordinates))
else:
raise TypeError(
"Invalid coordinates, must be iterable in format "
"(minx, miny, maxx, maxy) (for bounds) or (x, y) (for points). "
"Got `coordinates` = {}.".format(coordinates)
)
return indexes
@property
@doc(BaseSpatialIndex.size)
def size(self):
return len(self)
@property
@doc(BaseSpatialIndex.is_empty)
def is_empty(self):
return len(self) == 0
| bsd-3-clause |
abimannans/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
mizzao/ggplot | ggplot/themes/theme_gray.py | 12 | 4162 | from .theme import theme
import matplotlib as mpl
class theme_gray(theme):
"""
Standard theme for ggplot. Gray background w/ white gridlines.
Copied from the the ggplot2 codebase:
https://github.com/hadley/ggplot2/blob/master/R/theme-defaults.r
"""
def __init__(self):
super(theme_gray, self).__init__(complete=True)
self._rcParams["timezone"] = "UTC"
self._rcParams["lines.linewidth"] = "1.0"
self._rcParams["lines.antialiased"] = "True"
self._rcParams["patch.linewidth"] = "0.5"
self._rcParams["patch.facecolor"] = "348ABD"
self._rcParams["patch.edgecolor"] = "#E5E5E5"
self._rcParams["patch.antialiased"] = "True"
self._rcParams["font.family"] = "sans-serif"
self._rcParams["font.size"] = "12.0"
self._rcParams["font.serif"] = ["Times", "Palatino",
"New Century Schoolbook",
"Bookman", "Computer Modern Roman",
"Times New Roman"]
self._rcParams["font.sans-serif"] = ["Helvetica", "Avant Garde",
"Computer Modern Sans serif",
"Arial"]
self._rcParams["axes.facecolor"] = "#E5E5E5"
self._rcParams["axes.edgecolor"] = "bcbcbc"
self._rcParams["axes.linewidth"] = "1"
self._rcParams["axes.grid"] = "True"
self._rcParams["axes.titlesize"] = "x-large"
self._rcParams["axes.labelsize"] = "large"
self._rcParams["axes.labelcolor"] = "black"
self._rcParams["axes.axisbelow"] = "True"
self._rcParams["axes.color_cycle"] = ["#333333", "348ABD", "7A68A6",
"A60628",
"467821", "CF4457", "188487",
"E24A33"]
self._rcParams["grid.color"] = "white"
self._rcParams["grid.linewidth"] = "1.4"
self._rcParams["grid.linestyle"] = "solid"
self._rcParams["xtick.major.size"] = "0"
self._rcParams["xtick.minor.size"] = "0"
self._rcParams["xtick.major.pad"] = "6"
self._rcParams["xtick.minor.pad"] = "6"
self._rcParams["xtick.color"] = "#7F7F7F"
self._rcParams["xtick.direction"] = "out" # pointing out of axis
self._rcParams["ytick.major.size"] = "0"
self._rcParams["ytick.minor.size"] = "0"
self._rcParams["ytick.major.pad"] = "6"
self._rcParams["ytick.minor.pad"] = "6"
self._rcParams["ytick.color"] = "#7F7F7F"
self._rcParams["ytick.direction"] = "out" # pointing out of axis
self._rcParams["legend.fancybox"] = "True"
self._rcParams["figure.figsize"] = "11, 8"
self._rcParams["figure.facecolor"] = "1.0"
self._rcParams["figure.edgecolor"] = "0.50"
self._rcParams["figure.subplot.hspace"] = "0.5"
def apply_theme(self, ax):
'''Styles x,y axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have
been carried out (needs to know final tick spacing)
From: https://github.com/wrobstory/climatic/blob/master/climatic/stylers.py
'''
#Remove axis border
for child in ax.get_children():
if isinstance(child, mpl.spines.Spine):
child.set_alpha(0)
#Restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_markeredgewidth(1.4)
#Only show bottom left ticks
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
#Set minor grid lines
ax.grid(True, 'minor', color='#F2F2F2', linestyle='-', linewidth=0.7)
if not isinstance(ax.xaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
if not isinstance(ax.yaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
| bsd-2-clause |
simon-pepin/scikit-learn | sklearn/datasets/svmlight_format.py | 114 | 15826 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/backends/backend_cairo.py | 1 | 16708 | """
A Cairo backend for matplotlib
Author: Steve Chaplin
Cairo is a vector graphics library with cross-device output support.
Features of Cairo:
* anti-aliasing
* alpha channel
* saves image files as PNG, PostScript, PDF
http://cairographics.org
Requires (in order, all available from Cairo website):
cairo, pycairo
Naming Conventions
* classes MixedUpperCase
* varables lowerUpper
* functions underscore_separated
"""
from __future__ import division
import os, sys, warnings, gzip
import numpy as np
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
import cairo
except ImportError:
raise ImportError("Cairo backend requires that pycairo is installed.")
_version_required = (1,2,0)
if cairo.version_info < _version_required:
raise ImportError ("Pycairo %d.%d.%d is installed\n"
"Pycairo %d.%d.%d or later is required"
% (cairo.version_info + _version_required))
backend_version = cairo.version
del _version_required
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox, Affine2D
from matplotlib.font_manager import ttfFontProperty
from matplotlib import rcParams
_debug = False
#_debug = True
# Image::color_conv(format) for draw_image()
if sys.byteorder == 'little':
BYTE_FORMAT = 0 # BGRA
else:
BYTE_FORMAT = 1 # ARGB
class RendererCairo(RendererBase):
fontweights = {
100 : cairo.FONT_WEIGHT_NORMAL,
200 : cairo.FONT_WEIGHT_NORMAL,
300 : cairo.FONT_WEIGHT_NORMAL,
400 : cairo.FONT_WEIGHT_NORMAL,
500 : cairo.FONT_WEIGHT_NORMAL,
600 : cairo.FONT_WEIGHT_BOLD,
700 : cairo.FONT_WEIGHT_BOLD,
800 : cairo.FONT_WEIGHT_BOLD,
900 : cairo.FONT_WEIGHT_BOLD,
'ultralight' : cairo.FONT_WEIGHT_NORMAL,
'light' : cairo.FONT_WEIGHT_NORMAL,
'normal' : cairo.FONT_WEIGHT_NORMAL,
'medium' : cairo.FONT_WEIGHT_NORMAL,
'semibold' : cairo.FONT_WEIGHT_BOLD,
'bold' : cairo.FONT_WEIGHT_BOLD,
'heavy' : cairo.FONT_WEIGHT_BOLD,
'ultrabold' : cairo.FONT_WEIGHT_BOLD,
'black' : cairo.FONT_WEIGHT_BOLD,
}
fontangles = {
'italic' : cairo.FONT_SLANT_ITALIC,
'normal' : cairo.FONT_SLANT_NORMAL,
'oblique' : cairo.FONT_SLANT_OBLIQUE,
}
def __init__(self, dpi):
"""
"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self.dpi = dpi
self.gc = GraphicsContextCairo (renderer=self)
self.text_ctx = cairo.Context (
cairo.ImageSurface (cairo.FORMAT_ARGB32,1,1))
self.mathtext_parser = MathTextParser('Cairo')
RendererBase.__init__(self)
def set_ctx_from_surface (self, surface):
self.gc.ctx = cairo.Context (surface)
def set_width_height(self, width, height):
self.width = width
self.height = height
self.matrix_flipy = cairo.Matrix (yy=-1, y0=self.height)
# use matrix_flipy for ALL rendering?
# - problem with text? - will need to switch matrix_flipy off, or do a
# font transform?
def _fill_and_stroke (self, ctx, fill_c, alpha):
if fill_c is not None:
ctx.save()
if len(fill_c) == 3:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha)
else:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha*fill_c[3])
ctx.fill_preserve()
ctx.restore()
ctx.stroke()
@staticmethod
def convert_path(ctx, path, transform):
for points, code in path.iter_segments(transform):
if code == Path.MOVETO:
ctx.move_to(*points)
elif code == Path.LINETO:
ctx.line_to(*points)
elif code == Path.CURVE3:
ctx.curve_to(points[0], points[1],
points[0], points[1],
points[2], points[3])
elif code == Path.CURVE4:
ctx.curve_to(*points)
elif code == Path.CLOSEPOLY:
ctx.close_path()
def draw_path(self, gc, path, transform, rgbFace=None):
if len(path.vertices) > 18980:
raise ValueError("The Cairo backend can not draw paths longer than 18980 points.")
ctx = gc.ctx
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0, self.height)
ctx.new_path()
self.convert_path(ctx, path, transform)
self._fill_and_stroke(ctx, rgbFace, gc.get_alpha())
def draw_image(self, gc, x, y, im):
# bbox - not currently used
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
clippath, clippath_trans = gc.get_clip_path()
im.flipud_out()
rows, cols, buf = im.color_conv (BYTE_FORMAT)
surface = cairo.ImageSurface.create_for_data (
buf, cairo.FORMAT_ARGB32, cols, rows, cols*4)
# function does not pass a 'gc' so use renderer.ctx
ctx = self.gc.ctx
ctx.save()
if clippath is not None:
ctx.new_path()
RendererCairo.convert_path(ctx, clippath, clippath_trans)
ctx.clip()
y = self.height - y - rows
ctx.set_source_surface (surface, x, y)
ctx.paint()
ctx.restore()
im.flipud_out()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
# Note: x,y are device/display coords, not user-coords, unlike other
# draw_* methods
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
ctx = gc.ctx
ctx.new_path()
ctx.move_to (x, y)
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
size = prop.get_size_in_points() * self.dpi / 72.0
ctx.save()
if angle:
ctx.rotate (-angle * np.pi / 180)
ctx.set_font_size (size)
ctx.show_text (s.encode("utf-8"))
ctx.restore()
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
ctx = gc.ctx
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.dpi, prop)
ctx.save()
ctx.translate(x, y)
if angle:
ctx.rotate (-angle * np.pi / 180)
for font, fontsize, s, ox, oy in glyphs:
ctx.new_path()
ctx.move_to(ox, oy)
fontProp = ttfFontProperty(font)
ctx.save()
ctx.select_font_face (fontProp.name,
self.fontangles [fontProp.style],
self.fontweights[fontProp.weight])
size = fontsize * self.dpi / 72.0
ctx.set_font_size(size)
ctx.show_text(s.encode("utf-8"))
ctx.restore()
for ox, oy, w, h in rects:
ctx.new_path()
ctx.rectangle (ox, oy, w, h)
ctx.set_source_rgb (0, 0, 0)
ctx.fill_preserve()
ctx.restore()
def flipy(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return True
#return False # tried - all draw objects ok except text (and images?)
# which comes out mirrored!
def get_canvas_width_height(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
if ismath:
width, height, descent, fonts, used_characters = self.mathtext_parser.parse(
s, self.dpi, prop)
return width, height, descent
ctx = self.text_ctx
ctx.save()
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
# Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c
# but if /96.0 is used the font is too small
size = prop.get_size_in_points() * self.dpi / 72.0
# problem - scale remembers last setting and font can become
# enormous causing program to crash
# save/restore prevents the problem
ctx.set_font_size (size)
y_bearing, w, h = ctx.text_extents (s)[1:4]
ctx.restore()
return w, h, h + y_bearing
def new_gc(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self.gc.ctx.save()
self.gc._alpha = 1.0
self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA
return self.gc
def points_to_pixels(self, points):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return points/72.0 * self.dpi
class GraphicsContextCairo(GraphicsContextBase):
_joind = {
'bevel' : cairo.LINE_JOIN_BEVEL,
'miter' : cairo.LINE_JOIN_MITER,
'round' : cairo.LINE_JOIN_ROUND,
}
_capd = {
'butt' : cairo.LINE_CAP_BUTT,
'projecting' : cairo.LINE_CAP_SQUARE,
'round' : cairo.LINE_CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
def restore(self):
self.ctx.restore()
def set_alpha(self, alpha):
GraphicsContextBase.set_alpha(self, alpha)
_alpha = self.get_alpha()
rgb = self._rgb
self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], _alpha)
#def set_antialiased(self, b):
# enable/disable anti-aliasing is not (yet) supported by Cairo
def set_capstyle(self, cs):
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
self.ctx.set_line_cap (self._capd[cs])
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
if not rectangle: return
x,y,w,h = rectangle.bounds
# pixel-aligned clip-regions are faster
x,y,w,h = round(x), round(y), round(w), round(h)
ctx = self.ctx
ctx.new_path()
ctx.rectangle (x, self.renderer.height - h - y, w, h)
ctx.clip ()
def set_clip_path(self, path):
if not path: return
tpath, affine = path.get_transformed_path_and_affine()
ctx = self.ctx
ctx.new_path()
affine = affine + Affine2D().scale(1.0, -1.0).translate(0.0, self.renderer.height)
RendererCairo.convert_path(ctx, tpath, affine)
ctx.clip()
def set_dashes(self, offset, dashes):
self._dashes = offset, dashes
if dashes == None:
self.ctx.set_dash([], 0) # switch dashes off
else:
self.ctx.set_dash (
self.renderer.points_to_pixels (np.asarray(dashes)), offset)
def set_foreground(self, fg, isRGB=None):
GraphicsContextBase.set_foreground(self, fg, isRGB)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_joinstyle(self, js):
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
self.ctx.set_line_join(self._joind[js])
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
self._linewidth = w
self.ctx.set_line_width (self.renderer.points_to_pixels(w))
def new_figure_manager(num, *args, **kwargs): # called by backends/__init__.py
"""
Create a new figure manager instance
"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasCairo(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasCairo (FigureCanvasBase):
def print_png(self, fobj, *args, **kwargs):
width, height = self.get_width_height()
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width, height)
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)
renderer.set_ctx_from_surface (surface)
self.figure.draw (renderer)
surface.write_to_png (fobj)
def print_pdf(self, fobj, *args, **kwargs):
return self._save(fobj, 'pdf', *args, **kwargs)
def print_ps(self, fobj, *args, **kwargs):
return self._save(fobj, 'ps', *args, **kwargs)
def print_svg(self, fobj, *args, **kwargs):
return self._save(fobj, 'svg', *args, **kwargs)
def print_svgz(self, fobj, *args, **kwargs):
return self._save(fobj, 'svgz', *args, **kwargs)
def get_default_filetype(self):
return rcParams['cairo.format']
def _save (self, fo, format, **kwargs):
# save PDF/PS/SVG
orientation = kwargs.get('orientation', 'portrait')
dpi = 72
self.figure.dpi = dpi
w_in, h_in = self.figure.get_size_inches()
width_in_points, height_in_points = w_in * dpi, h_in * dpi
if orientation == 'landscape':
width_in_points, height_in_points = (height_in_points,
width_in_points)
if format == 'ps':
if not cairo.HAS_PS_SURFACE:
raise RuntimeError ('cairo has not been compiled with PS '
'support enabled')
surface = cairo.PSSurface (fo, width_in_points, height_in_points)
elif format == 'pdf':
if not cairo.HAS_PDF_SURFACE:
raise RuntimeError ('cairo has not been compiled with PDF '
'support enabled')
surface = cairo.PDFSurface (fo, width_in_points, height_in_points)
elif format in ('svg', 'svgz'):
if not cairo.HAS_SVG_SURFACE:
raise RuntimeError ('cairo has not been compiled with SVG '
'support enabled')
if format == 'svgz':
filename = fo
if is_string_like(fo):
fo = open(fo, 'wb')
fo = gzip.GzipFile(None, 'wb', fileobj=fo)
surface = cairo.SVGSurface (fo, width_in_points, height_in_points)
else:
warnings.warn ("unknown format: %s" % format)
return
# surface.set_dpi() can be used
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width_in_points, height_in_points)
renderer.set_ctx_from_surface (surface)
ctx = renderer.gc.ctx
if orientation == 'landscape':
ctx.rotate (np.pi/2)
ctx.translate (0, -height_in_points)
# cairo/src/cairo_ps_surface.c
# '%%Orientation: Portrait' is always written to the file header
# '%%Orientation: Landscape' would possibly cause problems
# since some printers would rotate again ?
# TODO:
# add portrait/landscape checkbox to FileChooser
self.figure.draw (renderer)
show_fig_border = False # for testing figure orientation and scaling
if show_fig_border:
ctx.new_path()
ctx.rectangle(0, 0, width_in_points, height_in_points)
ctx.set_line_width(4.0)
ctx.set_source_rgb(1,0,0)
ctx.stroke()
ctx.move_to(30,30)
ctx.select_font_face ('sans-serif')
ctx.set_font_size(20)
ctx.show_text('Origin corner')
ctx.show_page()
surface.finish()
| gpl-2.0 |
hollabaq86/haikuna-matata | env/lib/python2.7/site-packages/nltk/parse/dependencygraph.py | 5 | 31002 | # Natural Language Toolkit: Dependency Grammars
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Jason Narad <[email protected]>
# Steven Bird <[email protected]> (modifications)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
"""
Tools for reading and writing dependency trees.
The input is assumed to be in Malt-TAB format
(http://stp.lingfil.uu.se/~nivre/research/MaltXML.html).
"""
from __future__ import print_function, unicode_literals
from collections import defaultdict
from itertools import chain
from pprint import pformat
import subprocess
import warnings
from nltk.tree import Tree
from nltk.compat import python_2_unicode_compatible, string_types
#################################################################
# DependencyGraph Class
#################################################################
@python_2_unicode_compatible
class DependencyGraph(object):
"""
A container for the nodes and labelled edges of a dependency structure.
"""
def __init__(self, tree_str=None, cell_extractor=None, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""Dependency graph.
We place a dummy `TOP` node with the index 0, since the root node is
often assigned 0 as its head. This also means that the indexing of the
nodes corresponds directly to the Malt-TAB format, which starts at 1.
If zero-based is True, then Malt-TAB-like input with node numbers
starting at 0 and the root node assigned -1 (as produced by, e.g.,
zpar).
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
self.nodes = defaultdict(lambda: {'address': None,
'word': None,
'lemma': None,
'ctag': None,
'tag': None,
'feats': None,
'head': None,
'deps': defaultdict(list),
'rel': None,
})
self.nodes[0].update(
{
'ctag': 'TOP',
'tag': 'TOP',
'address': 0,
}
)
self.root = None
if tree_str:
self._parse(
tree_str,
cell_extractor=cell_extractor,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
def remove_by_address(self, address):
"""
Removes the node with the given address. References
to this node in others will still exist.
"""
del self.nodes[address]
def redirect_arcs(self, originals, redirect):
"""
Redirects arcs to any of the nodes in the originals list
to the redirect node address.
"""
for node in self.nodes.values():
new_deps = []
for dep in node['deps']:
if dep in originals:
new_deps.append(redirect)
else:
new_deps.append(dep)
node['deps'] = new_deps
def add_arc(self, head_address, mod_address):
"""
Adds an arc from the node specified by head_address to the
node specified by the mod address.
"""
relation = self.nodes[mod_address]['rel']
self.nodes[head_address]['deps'].setdefault(relation, [])
self.nodes[head_address]['deps'][relation].append(mod_address)
#self.nodes[head_address]['deps'].append(mod_address)
def connect_graph(self):
"""
Fully connects all non-root nodes. All nodes are set to be dependents
of the root node.
"""
for node1 in self.nodes.values():
for node2 in self.nodes.values():
if node1['address'] != node2['address'] and node2['rel'] != 'TOP':
relation = node2['rel']
node1['deps'].setdefault(relation, [])
node1['deps'][relation].append(node2['address'])
#node1['deps'].append(node2['address'])
def get_by_address(self, node_address):
"""Return the node with the given address."""
return self.nodes[node_address]
def contains_address(self, node_address):
"""
Returns true if the graph contains a node with the given node
address, false otherwise.
"""
return node_address in self.nodes
def to_dot(self):
"""Return a dot representation suitable for using with Graphviz.
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> print(dg.to_dot())
digraph G{
edge [dir=forward]
node [shape=plaintext]
<BLANKLINE>
0 [label="0 (None)"]
0 -> 2 [label="ROOT"]
1 [label="1 (John)"]
2 [label="2 (loves)"]
2 -> 1 [label=""]
2 -> 3 [label=""]
3 [label="3 (Mary)"]
}
"""
# Start the digraph specification
s = 'digraph G{\n'
s += 'edge [dir=forward]\n'
s += 'node [shape=plaintext]\n'
# Draw the remaining nodes
for node in sorted(self.nodes.values(), key=lambda v: v['address']):
s += '\n%s [label="%s (%s)"]' % (node['address'], node['address'], node['word'])
for rel, deps in node['deps'].items():
for dep in deps:
if rel is not None:
s += '\n%s -> %s [label="%s"]' % (node['address'], dep, rel)
else:
s += '\n%s -> %s ' % (node['address'], dep)
s += "\n}"
return s
def _repr_svg_(self):
"""Show SVG representation of the transducer (IPython magic).
>>> dg = DependencyGraph(
... 'John N 2\\n'
... 'loves V 0\\n'
... 'Mary N 2'
... )
>>> dg._repr_svg_().split('\\n')[0]
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
"""
dot_string = self.to_dot()
try:
process = subprocess.Popen(
['dot', '-Tsvg'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError:
raise Exception('Cannot find the dot binary from Graphviz package')
out, err = process.communicate(dot_string)
if err:
raise Exception(
'Cannot create svg representation by running dot from string: {}'
''.format(dot_string))
return out
def __str__(self):
return pformat(self.nodes)
def __repr__(self):
return "<DependencyGraph with {0} nodes>".format(len(self.nodes))
@staticmethod
def load(filename, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""
:param filename: a name of a file in Malt-TAB format
:param zero_based: nodes in the input file are numbered starting from 0
rather than 1 (as produced by, e.g., zpar)
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
:return: a list of DependencyGraphs
"""
with open(filename) as infile:
return [
DependencyGraph(
tree_str,
zero_based=zero_based,
cell_separator=cell_separator,
top_relation_label=top_relation_label,
)
for tree_str in infile.read().split('\n\n')
]
def left_children(self, node_index):
"""
Returns the number of left children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]['deps'].values())
index = self.nodes[node_index]['address']
return sum(1 for c in children if c < index)
def right_children(self, node_index):
"""
Returns the number of right children under the node specified
by the given address.
"""
children = chain.from_iterable(self.nodes[node_index]['deps'].values())
index = self.nodes[node_index]['address']
return sum(1 for c in children if c > index)
def add_node(self, node):
if not self.contains_address(node['address']):
self.nodes[node['address']].update(node)
def _parse(self, input_, cell_extractor=None, zero_based=False, cell_separator=None, top_relation_label='ROOT'):
"""Parse a sentence.
:param extractor: a function that given a tuple of cells returns a
7-tuple, where the values are ``word, lemma, ctag, tag, feats, head,
rel``.
:param str cell_separator: the cell separator. If not provided, cells
are split by whitespace.
:param str top_relation_label: the label by which the top relation is
identified, for examlple, `ROOT`, `null` or `TOP`.
"""
def extract_3_cells(cells, index):
word, tag, head = cells
return index, word, word, tag, tag, '', head, ''
def extract_4_cells(cells, index):
word, tag, head, rel = cells
return index, word, word, tag, tag, '', head, rel
def extract_7_cells(cells, index):
line_index, word, lemma, tag, _, head, rel = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, tag, tag, '', head, rel
def extract_10_cells(cells, index):
line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells
try:
index = int(line_index)
except ValueError:
# index can't be parsed as an integer, use default
pass
return index, word, lemma, ctag, tag, feats, head, rel
extractors = {
3: extract_3_cells,
4: extract_4_cells,
7: extract_7_cells,
10: extract_10_cells,
}
if isinstance(input_, string_types):
input_ = (line for line in input_.split('\n'))
lines = (l.rstrip() for l in input_)
lines = (l for l in lines if l)
cell_number = None
for index, line in enumerate(lines, start=1):
cells = line.split(cell_separator)
if cell_number is None:
cell_number = len(cells)
else:
assert cell_number == len(cells)
if cell_extractor is None:
try:
cell_extractor = extractors[cell_number]
except KeyError:
raise ValueError(
'Number of tab-delimited fields ({0}) not supported by '
'CoNLL(10) or Malt-Tab(4) format'.format(cell_number)
)
try:
index, word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells, index)
except (TypeError, ValueError):
# cell_extractor doesn't take 2 arguments or doesn't return 8
# values; assume the cell_extractor is an older external
# extractor and doesn't accept or return an index.
word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells)
if head == '_':
continue
head = int(head)
if zero_based:
head += 1
self.nodes[index].update(
{
'address': index,
'word': word,
'lemma': lemma,
'ctag': ctag,
'tag': tag,
'feats': feats,
'head': head,
'rel': rel,
}
)
# Make sure that the fake root node has labeled dependencies.
if (cell_number == 3) and (head == 0):
rel = top_relation_label
self.nodes[head]['deps'][rel].append(index)
if self.nodes[0]['deps'][top_relation_label]:
root_address = self.nodes[0]['deps'][top_relation_label][0]
self.root = self.nodes[root_address]
self.top_relation_label = top_relation_label
else:
warnings.warn(
"The graph doesn't contain a node "
"that depends on the root element."
)
def _word(self, node, filter=True):
w = node['word']
if filter:
if w != ',':
return w
return w
def _tree(self, i):
""" Turn dependency graphs into NLTK trees.
:param int i: index of a node
:return: either a word (if the indexed node is a leaf) or a ``Tree``.
"""
node = self.get_by_address(i)
word = node['word']
deps = sorted(chain.from_iterable(node['deps'].values()))
if deps:
return Tree(word, [self._tree(dep) for dep in deps])
else:
return word
def tree(self):
"""
Starting with the ``root`` node, build a dependency tree using the NLTK
``Tree`` constructor. Dependency labels are omitted.
"""
node = self.root
word = node['word']
deps = sorted(chain.from_iterable(node['deps'].values()))
return Tree(word, [self._tree(dep) for dep in deps])
def triples(self, node=None):
"""
Extract dependency triples of the form:
((head word, head tag), rel, (dep word, dep tag))
"""
if not node:
node = self.root
head = (node['word'], node['ctag'])
for i in sorted(chain.from_iterable(node['deps'].values())):
dep = self.get_by_address(i)
yield (head, dep['rel'], (dep['word'], dep['ctag']))
for triple in self.triples(node=dep):
yield triple
def _hd(self, i):
try:
return self.nodes[i]['head']
except IndexError:
return None
def _rel(self, i):
try:
return self.nodes[i]['rel']
except IndexError:
return None
# what's the return type? Boolean or list?
def contains_cycle(self):
"""Check whether there are cycles.
>>> dg = DependencyGraph(treebank_data)
>>> dg.contains_cycle()
False
>>> cyclic_dg = DependencyGraph()
>>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0}
>>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1}
>>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2}
>>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3}
>>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4}
>>> cyclic_dg.nodes = {
... 0: top,
... 1: child1,
... 2: child2,
... 3: child3,
... 4: child4,
... }
>>> cyclic_dg.root = top
>>> cyclic_dg.contains_cycle()
[3, 1, 2, 4]
"""
distances = {}
for node in self.nodes.values():
for dep in node['deps']:
key = tuple([node['address'], dep])
distances[key] = 1
for _ in self.nodes:
new_entries = {}
for pair1 in distances:
for pair2 in distances:
if pair1[1] == pair2[0]:
key = tuple([pair1[0], pair2[1]])
new_entries[key] = distances[pair1] + distances[pair2]
for pair in new_entries:
distances[pair] = new_entries[pair]
if pair[0] == pair[1]:
path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0])
return path
return False # return []?
def get_cycle_path(self, curr_node, goal_node_index):
for dep in curr_node['deps']:
if dep == goal_node_index:
return [curr_node['address']]
for dep in curr_node['deps']:
path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)
if len(path) > 0:
path.insert(0, curr_node['address'])
return path
return []
def to_conll(self, style):
"""
The dependency graph in CoNLL format.
:param style: the style to use for the format (3, 4, 10 columns)
:type style: int
:rtype: str
"""
if style == 3:
template = '{word}\t{tag}\t{head}\n'
elif style == 4:
template = '{word}\t{tag}\t{head}\t{rel}\n'
elif style == 10:
template = '{i}\t{word}\t{lemma}\t{ctag}\t{tag}\t{feats}\t{head}\t{rel}\t_\t_\n'
else:
raise ValueError(
'Number of tab-delimited fields ({0}) not supported by '
'CoNLL(10) or Malt-Tab(4) format'.format(style)
)
return ''.join(template.format(i=i, **node) for i, node in sorted(self.nodes.items()) if node['tag'] != 'TOP')
def nx_graph(self):
"""Convert the data in a ``nodelist`` into a networkx labeled directed graph."""
import networkx
nx_nodelist = list(range(1, len(self.nodes)))
nx_edgelist = [
(n, self._hd(n), self._rel(n))
for n in nx_nodelist if self._hd(n)
]
self.nx_labels = {}
for n in nx_nodelist:
self.nx_labels[n] = self.nodes[n]['word']
g = networkx.MultiDiGraph()
g.add_nodes_from(nx_nodelist)
g.add_edges_from(nx_edgelist)
return g
class DependencyGraphError(Exception):
"""Dependency graph exception."""
def demo():
malt_demo()
conll_demo()
conll_file_demo()
cycle_finding_demo()
def malt_demo(nx=False):
"""
A demonstration of the result of reading a dependency
version of the first sentence of the Penn Treebank.
"""
dg = DependencyGraph("""Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
""")
tree = dg.tree()
tree.pprint()
if nx:
# currently doesn't work
import networkx
from matplotlib import pylab
g = dg.nx_graph()
g.info()
pos = networkx.spring_layout(g, dim=1)
networkx.draw_networkx_nodes(g, pos, node_size=50)
# networkx.draw_networkx_edges(g, pos, edge_color='k', width=8)
networkx.draw_networkx_labels(g, pos, dg.nx_labels)
pylab.xticks([])
pylab.yticks([])
pylab.savefig('tree.png')
pylab.show()
def conll_demo():
"""
A demonstration of how to read a string representation of
a CoNLL format dependency tree.
"""
dg = DependencyGraph(conll_data1)
tree = dg.tree()
tree.pprint()
print(dg)
print(dg.to_conll(4))
def conll_file_demo():
print('Mass conll_read demo...')
graphs = [DependencyGraph(entry)
for entry in conll_data2.split('\n\n') if entry]
for graph in graphs:
tree = graph.tree()
print('\n')
tree.pprint()
def cycle_finding_demo():
dg = DependencyGraph(treebank_data)
print(dg.contains_cycle())
cyclic_dg = DependencyGraph()
cyclic_dg.add_node({'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0})
cyclic_dg.add_node({'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1})
cyclic_dg.add_node({'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2})
cyclic_dg.add_node({'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3})
cyclic_dg.add_node({'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4})
print(cyclic_dg.contains_cycle())
treebank_data = """Pierre NNP 2 NMOD
Vinken NNP 8 SUB
, , 2 P
61 CD 5 NMOD
years NNS 6 AMOD
old JJ 2 NMOD
, , 2 P
will MD 0 ROOT
join VB 8 VC
the DT 11 NMOD
board NN 9 OBJ
as IN 9 VMOD
a DT 15 NMOD
nonexecutive JJ 15 NMOD
director NN 12 PMOD
Nov. NNP 9 VMOD
29 CD 16 NMOD
. . 9 VMOD
"""
conll_data1 = """
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
"""
conll_data2 = """1 Cathy Cathy N N eigen|ev|neut 2 su _ _
2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _
4 wild wild Adj Adj attr|stell|onverv 5 mod _ _
5 zwaaien zwaai N N soort|mv|neut 2 vc _ _
6 . . Punc Punc punt 5 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 met met Prep Prep voor 8 mod _ _
4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _
5 moeder moeder N N soort|ev|neut 3 obj1 _ _
6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _
7 gaan ga V V hulp|inf 6 vc _ _
8 winkelen winkel V V intrans|inf 11 cnj _ _
9 , , Punc Punc komma 8 punct _ _
10 zwemmen zwem V V intrans|inf 11 cnj _ _
11 of of Conj Conj neven 7 vc _ _
12 terrassen terras N N soort|mv|neut 11 cnj _ _
13 . . Punc Punc punt 12 punct _ _
1 Dat dat Pron Pron aanw|neut|attr 2 det _ _
2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _
5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _
6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _
7 . . Punc Punc punt 6 punct _ _
1 Het het Pron Pron onbep|neut|zelfst 2 su _ _
2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _
3 bij bij Prep Prep voor 2 ld _ _
4 de de Art Art bep|zijdofmv|neut 6 det _ _
5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _
6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _
7 die die Pron Pron betr|neut|zelfst 6 mod _ _
8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _
9 ginds ginds Adv Adv gew|aanw 12 mod _ _
10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _
11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _
12 gelaten laat V V trans|verldw|onverv 11 vc _ _
13 . . Punc Punc punt 12 punct _ _
1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _
2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _
3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _
4 naast naast Prep Prep voor 11 mod _ _
5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _
6 op op Prep Prep voor 11 ld _ _
7 de de Art Art bep|zijdofmv|neut 8 det _ _
8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _
9 kunnen kan V V hulp|inf 2 vc _ _
10 gaan ga V V hulp|inf 9 vc _ _
11 liggen lig V V intrans|inf 10 vc _ _
12 . . Punc Punc punt 11 punct _ _
1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _
2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _
3 mams mams N N soort|ev|neut 4 det _ _
4 rug rug N N soort|ev|neut 5 obj1 _ _
5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _
6 hebben heb V V hulp|inf 2 vc _ _
7 en en Conj Conj neven 0 ROOT _ _
8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _
9 de de Art Art bep|zijdofmv|neut 10 det _ _
10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _
11 . . Punc Punc punt 10 punct _ _
1 Of of Conj Conj onder|metfin 0 ROOT _ _
2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _
3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _
4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _
5 met met Prep Prep voor 10 mod _ _
6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _
7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _
8 rond rond Adv Adv deelv 10 svp _ _
9 kunnen kan V V hulp|inf 3 vc _ _
10 slenteren slenter V V intrans|inf 9 vc _ _
11 in in Prep Prep voor 10 mod _ _
12 de de Art Art bep|zijdofmv|neut 13 det _ _
13 buurt buurt N N soort|ev|neut 11 obj1 _ _
14 van van Prep Prep voor 13 mod _ _
15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _
16 . . Punc Punc punt 15 punct _ _
"""
if __name__ == '__main__':
demo()
| mit |
braghiere/JULESv4.6_clump | examples/tonzi_4.6/output/plotfapar.py | 2 | 8722 | from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
import numpy as np
import datetime as dt # Python standard library datetime module
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
def ncdump(nc_fid, verb=True):
'''
ncdump outputs dimensions, variables and their attribute information.
The information is similar to that of NCAR's ncdump utility.
ncdump requires a valid instance of Dataset.
Parameters
----------
nc_fid : netCDF4.Dataset
A netCDF4 dateset object
verb : Boolean
whether or not nc_attrs, nc_dims, and nc_vars are printed
Returns
-------
nc_attrs : list
A Python list of the NetCDF file global attributes
nc_dims : list
A Python list of the NetCDF file dimensions
nc_vars : list
A Python list of the NetCDF file variables
'''
def print_ncattr(key):
"""
Prints the NetCDF file attributes for a given key
Parameters
----------
key : unicode
a valid netCDF4.Dataset.variables key
"""
try:
print "\t\ttype:", repr(nc_fid.variables[key].dtype)
for ncattr in nc_fid.variables[key].ncattrs():
print '\t\t%s:' % ncattr,\
repr(nc_fid.variables[key].getncattr(ncattr))
except KeyError:
print "\t\tWARNING: %s does not contain variable attributes" % key
# NetCDF global attributes
nc_attrs = nc_fid.ncattrs()
if verb:
print "NetCDF Global Attributes:"
for nc_attr in nc_attrs:
print '\t%s:' % nc_attr, repr(nc_fid.getncattr(nc_attr))
nc_dims = [dim for dim in nc_fid.dimensions] # list of nc dimensions
# Dimension shape information.
if verb:
print "NetCDF dimension information:"
for dim in nc_dims:
print "\tName:", dim
print "\t\tsize:", len(nc_fid.dimensions[dim])
print_ncattr(dim)
# Variable information.
nc_vars = [var for var in nc_fid.variables] # list of nc variables
if verb:
print "NetCDF variable information:"
for var in nc_vars:
if var not in nc_dims:
print '\tName:', var
print "\t\tdimensions:", nc_fid.variables[var].dimensions
print "\t\tsize:", nc_fid.variables[var].size
print_ncattr(var)
return nc_attrs, nc_dims, nc_vars
my_example_nc_file = '/home/mn811042/jules4.x/4.6/trunk/examples/point_loobos/output/loobos.tstep_can_struc_a_1.nc'
my_example_nc_file_2 = '/home/mn811042/jules4.x/4.6/trunk/examples/point_loobos/output/loobos.tstep_can_struc_a_05.nc'
my_example_nc_file_3 = '/home/mn811042/jules4.x/4.6/trunk/examples/point_loobos/output/loobos.tstep_can_struc_a_1_half_lai.nc'
my_example_nc_file_4 = '/home/mn811042/jules4.x/4.6/trunk/examples/point_loobos/output/loobos.tstep_can_struc_a_025.nc'
my_example_nc_file_5 = '/home/mn811042/jules4.x/4.6/trunk/examples/point_loobos/output/loobos.tstep_can_struc_a_075.nc'
nc_fid = Dataset(my_example_nc_file, mode='r')
nc_fid_2 = Dataset(my_example_nc_file_2, mode='r')
nc_fid_3 = Dataset(my_example_nc_file_3, mode='r')
nc_fid_4 = Dataset(my_example_nc_file_4, mode='r')
nc_fid_5 = Dataset(my_example_nc_file_5, mode='r')
nc_attrs, nc_dims, nc_vars = ncdump(nc_fid)
# Extract data from NetCDF file
lats = nc_fid.variables['latitude'][:] # extract/copy the data
lons = nc_fid.variables['longitude'][:]
time = nc_fid.variables['time'][:]
fapar= nc_fid.variables['fapar'][:] # shape is time, lat, lon as shown above - 'PFT gross primary productivity'
fapar_2 = nc_fid_2.variables['fapar'][:] # shape is time, lat, lon as shown above - 'PFT gross primary productivity'
fapar_3 = nc_fid_3.variables['fapar'][:] # shape is time, lat, lon as shown above - 'PFT gross primary productivity'
fapar_4 = nc_fid_4.variables['fapar'][:] # shape is time, lat, lon as shown above - 'PFT gross primary productivity'
fapar_5 = nc_fid_5.variables['fapar'][:] # shape is time, lat, lon as shown above - 'PFT gross primary productivity'
print dt.timedelta(hours=np.float64(24))
time_idx = 100 # some random day in 2012
# Python and the renalaysis are slightly off in time so this fixes that problem
#offset = dt.timedelta(hours=t3)
offset = dt.timedelta(hours=np.float64(24))
# List of all times in the file as datetime objects
dt_time = [dt.date(1997, 1, 1) + dt.timedelta(hours=np.float64(t)) - offset\
for t in time]
cur_time = dt_time[time_idx]
# Plot of global temperature on our random day
#>>>>>fig = plt.figure()
#>>>>>fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9)
# Setup the map. See http://matplotlib.org/basemap/users/mapsetup.html
# for other projections.
#>>>>>m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\
#>>>>> llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0)
#>>>>>m.drawcoastlines()
#>>>>>m.drawmapboundary()
# Make the plot continuous
#>>>>>air_cyclic, lons_cyclic = addcyclic(air[time_idx, :, :], lons)
# Shift the grid so lons go from -180 to 180 instead of 0 to 360.
#>>>>>air_cyclic, lons_cyclic = shiftgrid(180., air_cyclic, lons_cyclic, start=False)
# Create 2D lat/lon arrays for Basemap
#>>>>>lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
# Transforms lat/lon into plotting coordinates for projection
#>>>>>x, y = m(lon2d, lat2d)
# Plot of air temperature with 11 contour intervals
#>>>>>cs = m.contourf(x, y, air_cyclic, 11, cmap=plt.cm.Spectral_r)
#>>>>>cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5)
#>>>>>cbar.set_label("%s (%s)" % (nc_fid.variables['air'].var_desc,\
#>>>>> nc_fid.variables['air'].units))
#>>>>>plt.title("%s on %s" % (nc_fid.variables['air'].var_desc, cur_time))
# Writing NetCDF files
# For this example, we will create two NetCDF4 files. One with the global air
# temperature departure from its value at Darwin, Australia. The other with
# the temperature profile for the entire year at Darwin.
darwin = {'name': 'Darwin, Australia', 'lat': -12.45, 'lon': 130.83}
# Find the nearest latitude and longitude for Darwin
lat_idx = np.abs(lats - darwin['lat']).argmin()
lon_idx = np.abs(lons - darwin['lon']).argmin()
# Simple example: temperature profile for the entire year at Darwin.
# Open a new NetCDF file to write the data to. For format, you can choose from
# 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'
#>>>>>w_nc_fid = Dataset('darwin_2012.nc', 'w', format='NETCDF4')
#>>>>>w_nc_fid.description = "NCEP/NCAR Reanalysis %s from its value at %s. %s" %\
#>>>>> (nc_fid.variables['air'].var_desc.lower(),\
#>>>>> darwin['name'], nc_fid.description)
# Using our previous dimension info, we can create the new time dimension
# Even though we know the size, we are going to set the size to unknown
#>>>>>w_nc_fid.createDimension('time', None)
#>>>>>w_nc_dim = w_nc_fid.createVariable('time', nc_fid.variables['time'].dtype,\
#>>>>> ('time',))
# You can do this step yourself but someone else did the work for us.
#>>>>>for ncattr in nc_fid.variables['time'].ncattrs():
#>>>>> w_nc_dim.setncattr(ncattr, nc_fid.variables['time'].getncattr(ncattr))
# Assign the dimension data to the new NetCDF file.
#>>>>>w_nc_fid.variables['time'][:] = time
#>>>>>w_nc_var = w_nc_fid.createVariable('air', 'f8', ('time'))
#>>>>>w_nc_var.setncatts({'long_name': u"mean Daily Air temperature",\
#>>>>> 'units': u"degK", 'level_desc': u'Surface',\
#>>>>> 'var_desc': u"Air temperature",\
#>>>>> 'statistic': u'Mean\nM'})
#>>>>>w_nc_fid.variables['air'][:] = air[time_idx, lat_idx, lon_idx]
#>>>>>w_nc_fid.close() # close the new file
#plt.plot(dt_time, gpp_2[:, lat_idx, lon_idx], c='b', marker='o',label='PFT NEE - a = 0.5')
plt.plot(dt_time, fapar[:, lat_idx, lon_idx], c='k', marker='o',label='a = 1.0, LAI = LAI')
plt.plot(dt_time, fapar_5[:, lat_idx, lon_idx], c='y', marker='o',label='a = 0.75, LAI = LAI')
plt.plot(dt_time, fapar_2[:, lat_idx, lon_idx], c='r', marker='o',label='a = 0.5, LAI = LAI')
plt.plot(dt_time, fapar_4[:, lat_idx, lon_idx], c='b', marker='o',label='a = 0.25, LAI = LAI')
plt.plot(dt_time, fapar_3[:, lat_idx, lon_idx], c='g', marker='_',label='a = 0.5, LAI = LAI/2')
plt.xlabel("Time in seconds since 1997-01-01 00:00:00")
plt.ylabel("fAPAR")
plt.title("Loobos Flux site")
plt.grid()
plt.legend(loc="best")
plt.show()
#plt.savefig("/home/mn811042/jules4.x/4.6/trunk/examples/point_loobos/output/fapar_test_karina_code.png")
# Close original NetCDF file.
nc_fid.close()
| gpl-2.0 |
eickenberg/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
developerator/Maturaarbeit | GAN-TransferLearning/Blondes32/Blondes32_Transfer_dcgan.py | 1 | 5452 | '''
By Tim Ehrensberger
The base of the functions for the network's training is taken from https://github.com/Zackory/Keras-MNIST-GAN/blob/master/mnist_gan.py by Zackory Erickson
'''
import os
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from keras.layers import Input, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D
from keras.models import Model, Sequential, load_model
from keras.layers.core import Reshape, Dense, Dropout, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Convolution2D, UpSampling2D
from keras.datasets import cifar10
from keras.optimizers import Adam
from keras.regularizers import l1_l2
#------
# DATA
#------
from keras import backend as K
K.set_image_dim_ordering('th')
import h5py
# Get hdf5 file
# Please read the README_Info in GAN-TransferLearning for information about how to get the dataset Blondies32_Transfer.h5
hdf5_file = os.path.join("PATH TO DATASET", "Blondies32_Transfer.h5")
with h5py.File(hdf5_file, "r") as hf:
X_train = hf["data"] [()] #[()] makes it read the file into one array
X_train = X_train.astype(np.float32) / 255
#----------------
# HYPERPARAMETERS
#----------------
randomDim = 100
adam = Adam(lr=0.0002, beta_1=0.5)
reg = lambda: l1_l2(l1=1e-7, l2=1e-7)
# Load the old models
# Please read the README_Info in GAN-TransferLearning for information about how to get the weight-files below
old_discriminator = 'dcgan32_discriminator_transfer.h5'
old_generator = 'dcgan32_generator_transfer.h5'
# There is a strange bug if the optimizer is loaded from last network therefore just delete them
with h5py.File(old_generator, 'a') as f:
if 'optimizer_weights' in f.keys():
del f['optimizer_weights']
with h5py.File(old_discriminator, 'a') as f:
if 'optimizer_weights' in f.keys():
del f['optimizer_weights']
generator = load_model(old_generator)
discriminator = load_model(old_discriminator)
#-----
# GAN
#-----
discriminator.trainable = False
ganInput = Input(shape=(randomDim,))
x = generator(ganInput)
ganOutput = discriminator(x)
gan = Model(inputs=ganInput, outputs=ganOutput)
gan.compile(loss='binary_crossentropy', optimizer=adam)
#-----------
# FUNCTIONS
#-----------
def plotLoss(epoch):
assertExists('images')
plt.figure(figsize=(10, 8))
plt.plot(dLosses, label='Discriminative loss')
plt.plot(gLosses, label='Generative loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.savefig('images/dcgan_loss_epoch_%d.png' % epoch)
# Create a wall of generated images
examples=100
noise = np.random.normal(0, 1, size=[examples, randomDim])
def plotGeneratedImages(epoch dim=(10, 10), figsize=(10, 10)):
generatedImages = generator.predict(noise)
generatedImages = generatedImages.transpose(0, 2, 3, 1)
assertExists('images')
plt.figure(figsize=figsize)
for i in range(generatedImages.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.imshow(generatedImages[i, :, :, :], interpolation='nearest')
plt.axis('off')
plt.tight_layout()
plt.savefig('images/transfer_dcgan_generated_image_epoch_%d.png' % epoch)
# Save the generator and discriminator networks (and weights) for later use
def savemodels(epoch):
assertExists('models')
generator.save('models/transfer_dcgan_generator_epoch_%d.h5' % epoch)
discriminator.save('models/transfer_dcgan_discriminator_epoch_%d.h5' % epoch)
dLosses = []
gLosses = []
def train(epochs=1, batchSize=128, save_interval=1, start_at=1):
batchCount = X_train.shape[0] // batchSize
print('Epochs:', epochs)
print('Batch size:', batchSize)
print('Batches per epoch:', batchCount)
#plot once before training
plotGeneratedImages(0)
for e in range(start_at, epochs+1):
print('-'*15, 'Epoch %d' % e, '-'*15)
for _ in tqdm(range(batchCount)):
# Get a random set of input noise and images
noise = np.random.normal(0, 1, size=[batchSize, randomDim])
imageBatch = X_train[np.random.randint(0, X_train.shape[0], size=batchSize)]
# Generate fake images
generatedImages = generator.predict(noise)
X = np.concatenate([imageBatch, generatedImages])
# Labels for generated and real data
yDis = np.zeros(2*batchSize)
# One-sided label smoothing = not exactly 1
yDis[:batchSize] = 0.95
# Train discriminator
discriminator.trainable = True
dloss = discriminator.train_on_batch(X, yDis) # here only D is trained
# Train generator
noise = np.random.normal(0, 1, size=[batchSize, randomDim])
yGen = np.ones(batchSize)
discriminator.trainable = False
gloss = gan.train_on_batch(noise, yGen) # here only G is trained because D is not trainable
# Store loss of most recent batch from this epoch
dLosses.append(dloss)
gLosses.append(gloss)
#plot after specified number of epochs
if (e == 1 or e % save_interval == 0):
plotGeneratedImages(e)
savemodels(e)
# Plot losses from all epochs
plotLoss(e)
def assertExists(path):
if not os.path.exists(path):
os.makedirs(path)
if __name__ == '__main__':
train(100, 64, 1)
| mit |
lukasmerten/GitPlayground | UsefulPythonScripts/Ferrie2007_Innen.py | 1 | 3648 | import numpy as np
import matplotlib.pyplot as plt
import pylab
import scipy.integrate as integrate
x= -500
y= -500
z = 10
# Konstanten fuer CMZ
xc =-50 # Position Mitte in allg Koordinaten
yc = 50
TettaC = 70
#Konstanten fuer DISK
alpha = 13.5
beta = 20.
TettaD = 48.5
# Abmessungen in CMZ Koordinaten
XMAX=250
XC = XMAX/2
LC = XMAX/(2*np.log(2)**0.25)
HC = 18.
HC2 = 54.
# Abmessungen in DISK Koordinaten
XD = 1200
LD = 438.
HD = 42.
HD2 = 120.
#Konstanten fuer HII -WIM-
y3 = -10
z3= -20
L3 = 145.
H3 = 26.
L2 = 3700.
H2 = 140.
L1 = 17000
H1=950.
#Konstanen fuer HII VHIM
alphaVH = 21
LVH=162
HVH = 90
def Bogenmass(x): # Trafo ins Bogenmass fuer Winkel zur Berechnung
return x*np.pi/180
def cos(x): # Cos FKT fuer Gradmass
x=Bogenmass(x)
return np.cos(x)
def sin(x): # Sin FKT fuer Gradmass
x=Bogenmass(x)
return np.sin(x)
def sech2(x):
return np.cosh(x)**2
def u(x):
if x.all<0:
return 0
else:
return 1
def CMZ_X_Trafo(x,y):
return (x-xc)*cos(TettaC) +(y-yc)*sin(TettaC)
def CMZ_Y_Trafo(x,y):
return -(x-xc)*sin(TettaC) +(y-yc)*cos(TettaC)
def DISK_X_Trafo(x,y,z):
return x*cos(beta)*cos(TettaD) - y*(sin(alpha)*sin(beta)*cos(TettaD) -cos(alpha)*sin(TettaD))-z*(cos(alpha)*sin(beta)*cos(TettaD) +sin(alpha)*sin(TettaD))
def DISK_Y_Trafo(x,y,z):
xT= x*cos(beta)*sin(TettaD)
yT = y*(sin(alpha)*sin(beta)*sin(TettaD) +cos(alpha)*cos(TettaD))
zT = z*(cos(alpha)*sin(beta)*sin(TettaD) -sin(alpha)*sin(TettaD))
return -xT+yT+zT
def DISK_Z_Trafo(x,y,z):
xT = x*sin(beta)
yT = y*sin(alpha)*cos(beta)
zT = z*cos(alpha)*cos(beta)
return xT+yT+zT
#Mollekularer Wasserstoff im CMZ,
def n_H2_CMZ(x0,y0,z0): # Eingabe in Urspruenglichen koordinaten
x = CMZ_X_Trafo(x0,y0)
y = CMZ_Y_Trafo(x0,y0)
XY_Help = ((np.sqrt(x**2+(2.5*y)**2)-XC)/LC)**4
return 150*np.exp(-XY_Help)*np.exp(-(z0/HC)**2)
#Atomarer Wasserstoff im CMZ
def n_HI_CMZ(x0,y0,z0): #Eingabe in Urspruenglichen Koordinaten
x=CMZ_X_Trafo(x0,y0)
y=CMZ_Y_Trafo(x0,y0)
A=np.sqrt(x**2 +(2.5*y)**2)
B= (A-XC)/LC
XY_Help=B**4
Z = (z0/HC2)**2
return 8.8*np.exp(-XY_Help)*np.exp(-Z)
#Mollekularer Wasserstoff in der DISK
def n_H2_DISK(x0,y0,z0):
x= DISK_X_Trafo(x0,y0,z0)
y= DISK_Y_Trafo(x0,y0,z0)
z=DISK_Z_Trafo(x0,y0,z0)
return 4.8*np.exp(-((np.sqrt(x**2 + (3.1*y)**2) - XD)/LD)**4)*np.exp(-(z/HD)**2)
#Atomarer Wasserstoff in der DISK
def n_HI_DISK(x0,y0,z0):
x= DISK_X_Trafo(x0,y0,z0)
y= DISK_Y_Trafo(x0,y0,z0)
z=DISK_Z_Trafo(x0,y0,z0)
return 0.34*np.exp(-((np.sqrt(x**2 + (3.1*y)**2) - XD)/LD)**4)*np.exp(-(z/HD2)**2)
#Ioniesierter Wasserstoff
def n_HII_WIM(x0,y0,z0):
r=np.sqrt(x0**2+y0**2+z0**2)
P1 = np.exp(-(x**2+(y0-y3)**2)/L3**2)*np.exp(-(z0-z3)**2/H3**2)
P2 = np.exp(-((r-L2)/(0.5*L2))**2)*sech2(z/H2)
P3 = np.cos(np.pi*r*0.5/L1)*sech2(z/H1)
return 8.0*(P1+0.009*P2+0.005*P3)
def n_HII_VHIM(x0,y0,z0):
e = y0*cos(alphaVH)+z0*sin(alphaVH)
s = -y0*sin(alphaVH) + z*cos(alphaVH)
return 0.29*np.exp(-((x0**2+e**2)/LVH**2 + s**2/HVH**2))
def n_HII(x0,y0,z0):
return n_HII_VHIM(x0,y0,z0) +n_HII_WIM(x0,y0,z0)
def n_HI(x,y,z):
return n_HI_DISK(x,y,z) + n_HI_CMZ(x,y,z)
def n_H2(x,y,z):
return n_H2_CMZ(x,y,z) + n_H2_DISK(x,y,z)
x = pylab.linspace(-100,100,200)
y = pylab.linspace(-100,100,200)
#2d Arrays Definieren
xx,yy = pylab.meshgrid(x,y)
#Daten fuellen
zz = pylab.zeros(xx.shape)
for i in range(xx.shape[0]):
for j in range(xx.shape[1]):
zz[i,j] = n_H2(xx[i,j], yy[i,j],0)
# plotten
plt.figure()
plt.title('Massdistribution for H2')
plt.pcolormesh(xx,yy,zz)
plt.colorbar()
plt.contour(xx,yy,zz)
plt.gca().set_aspect("equal")
plt.xlabel('x/pc')
plt.ylabel('y/pc')
plt.show()
| mit |
wuxue/altanalyze | misopy/sashimi_plot/Sashimi.py | 2 | 4558 | ##
## Class for representing figures
##
import os
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
import string
import misopy.sashimi_plot.plot_utils.plot_settings as plot_settings
import misopy.sashimi_plot.plot_utils.plotting as plotting
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = 'Arial'
class Sashimi:
"""
Representation of a figure.
"""
def __init__(self, label, output_dir, dimensions=None, png=False,
output_filename=None, settings_filename=None,
event=None, chrom=None, no_posteriors=False):
"""
Initialize image settings.
"""
self.output_ext = ".pdf"
if png:
self.output_ext = ".png"
# Plot label, will be used in creating the plot
# output filename
self.label = label
# Set output directory
self.set_output_dir(output_dir)
# Plot settings
self.settings_filename = settings_filename
if self.settings_filename != None:
self.settings = plot_settings.parse_plot_settings(settings_filename,
event=event,
chrom=chrom,
no_posteriors=no_posteriors)
else:
# Load default settings if no settings filename was given
self.settings = plot_settings.get_default_settings()
if output_filename != None:
# If explicit output filename is given to us, use it
self.output_filename = output_filename
else:
# Otherwise, use the label and the output directory
self.set_output_filename()
if dimensions != None:
self.dimensions = dimensions
else:
fig_height = self.settings["fig_height"]
fig_width = self.settings["fig_width"]
#print "Reading dimensions from settings..."
#print " - Height: %.2f" %(float(fig_height))
#print " - Width: %.2f" %(float(fig_width))
self.dimensions = [fig_width, fig_height]
def set_output_dir(self, output_dir):
self.output_dir = os.path.abspath(os.path.expanduser(output_dir))
def set_output_filename(self):
plot_basename = "%s%s" %(self.label, self.output_ext)
self.output_filename = os.path.join(self.output_dir, plot_basename)
def setup_figure(self):
#print "Setting up plot using dimensions: ", self.dimensions
plt.figure(figsize=self.dimensions)
# If asked, use sans serif fonts
font_size = self.settings["font_size"]
if self.settings["sans_serif"]:
#print "Using sans serif fonts."
plotting.make_sans_serif(font_size=font_size)
def save_plot(self, plot_label=None,show=False):
"""
Save plot to the output directory. Determine
the file type.
"""
if self.output_filename == None:
raise Exception, "sashimi_plot does not know where to save the plot."
output_fname = None
if plot_label is not None:
# Use custom plot label if given
ext = self.output_filename.rsplit(".")[0]
dirname = os.path.dirname(self.output_filename)
output_fname = \
os.path.dirname(dirname, "%s.%s" %(plot_label, ext))
else:
output_fname = self.output_filename
### determine whether to show the plot interactively, using a parameter file
try:
s = open(string.split(output_fname,'SashimiPlots')[0]\
+'SashimiPlots/show.txt','r')
show_param=s.read()
except Exception: show_param = 'False'
print '.',
#print "Saving plot to: %s" %(output_fname)
#output_fname2=output_fname.replace(".pdf")
plt.savefig(output_fname) ### An error here appears to be due to an issue with one of the BAM files (can't print out the bam file names in plot_gene.py)
### Write out a png as well
output_fname = string.replace(output_fname,'.pdf','.png')
plt.savefig(output_fname,dpi=120)
if 'TRUE' in show_param:
plt.show()
plt.clf()
else:
plt.clf()
plt.close() ### May result in TK associated errors later on | apache-2.0 |
garnachod/SimpleDoc2Vec | doc2vec.py | 1 | 3551 | # gensim modules
from gensim import utils
from gensim.models.doc2vec import TaggedDocument
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Doc2Vec
from collections import namedtuple
import time
import random
from blist import blist
# numpy
import numpy as np
# classifier
from sklearn.linear_model import LogisticRegression
class LabeledSentenceMio(namedtuple('LabeledSentenceMio', 'words tags')):
def __new__(cls, words, tags):
# add default values
return super(LabeledSentenceMio, cls).__new__(cls, words, tags)
'''
class LabeledSentenceMio(namedtuple):
"""docstring for LabeledSentenceMio"""
def __init__(self, words=None, tags=None):
super(LabeledSentenceMio, self).__init__()
self.words = words
self.tags = tags
'''
class LabeledLineSentence(object):
def __init__(self, sources):
self.sources = sources
self.sentences = None
flipped = {}
# make sure that keys are unique
for key, value in sources.items():
if value not in flipped:
flipped[value] = [key]
else:
raise Exception('Non-unique prefix encountered')
def to_array(self):
if self.sentences is None:
self.sentences = blist()
for source, prefix in self.sources.items():
with utils.smart_open(source) as fin:
for item_no, line in enumerate(fin):
line = line.replace("\n", "")
self.sentences.append(TaggedDocument(utils.to_unicode(line).split(), [prefix + '_%s' % item_no]))
return self.sentences
def sentences_perm(self):
random.shuffle(self.sentences)
return self.sentences
if __name__ == '__main__':
sources = {'data/trainneg.txt':'TRAIN_NEG', 'data/trainpos.txt':'TRAIN_POS', 'data/trainunsup.txt':'TRAIN_UNSP'}
dimension = 100
total_start = time.time()
sentences = LabeledLineSentence(sources)
dbow = True
if dbow:
model = Doc2Vec(min_count=1, window=10, size=dimension, sample=1e-3, negative=5, dm=0 ,workers=6, alpha=0.04)
print "inicio vocab"
model.build_vocab(sentences.to_array())
print "fin vocab"
first_alpha = model.alpha
last_alpha = 0.01
next_alpha = first_alpha
epochs = 30
for epoch in range(epochs):
start = time.time()
print "iniciando epoca DBOW:"
print model.alpha
model.train(sentences.sentences_perm())
end = time.time()
next_alpha = (((first_alpha - last_alpha) / float(epochs)) * float(epochs - (epoch+1)) + last_alpha)
model.alpha = next_alpha
print "tiempo de la epoca " + str(epoch) +": " + str(end - start)
model.save('./imdb_dbow.d2v')
dm = True
if dm:
#model = Doc2Vec(min_count=1, window=10, size=dimension, sample=1e-3, negative=5, workers=6, dm_mean=1, alpha=0.04)
model = Doc2Vec(min_count=1, window=10, size=dimension, sample=1e-3, negative=5, workers=6, alpha=0.04)
#model = Doc2Vec(min_count=1, window=10, size=dimension, sample=1e-3, negative=5, workers=6, alpha=0.04, dm_concat=1)
#
print "inicio vocab"
model.build_vocab(sentences.to_array())
print "fin vocab"
first_alpha = model.alpha
last_alpha = 0.01
next_alpha = first_alpha
epochs = 30
for epoch in range(epochs):
start = time.time()
print "iniciando epoca DM:"
print model.alpha
model.train(sentences.sentences_perm())
end = time.time()
next_alpha = (((first_alpha - last_alpha) / float(epochs)) * float(epochs - (epoch+1)) + last_alpha)
model.alpha = next_alpha
print "tiempo de la epoca " + str(epoch) +": " + str(end - start)
model.save('./imdb_dm.d2v')
total_end = time.time()
print "tiempo total:" + str((total_end - total_start)/60.0) | gpl-2.0 |
rbiswas4/AnalyzeSN | analyzeSN/cov_utils.py | 3 | 5874 | #!/usr/bin/env python
"""
A number of utility functions to conventientyly deal with covariances
- generateCov : function to generate random covariances as `np.ndarray`
- covariance : dress up `np.ndarray` covariances as `pd.DataFrames`
- subcovariance : extract subcovariance for two indexes or parameter names
- log_covariance : cov(log(x), params) given cov(x, params) in linear approx.
- expAVsquare : < (A V)^2 > given Cov, where A is const, V ~ N(0, Cov)
"""
import numpy as np
import pandas as pd
# from copy import deepcopy
__all__ = ['expAVsquare', 'log_covariance', 'subcovariance', 'covariance',
'generateCov']
def expAVsquare(covV, A):
"""
Return the expectation of (A^T V)^2 where A is a constant vector and V is
a random vector V ~ N(0., covV) by computing A^T * covV * A
Parameters
----------
covV : `np.ndarray`, mandatory
A : `np.array`, mandatory
vector of constants.
Returns
-------
float variance (scalar)
"""
va = np.sum(covV* A, axis=1)
var = np.sum(A * va, axis=0)
return var
def log_covariance(covariance, paramName, paramValue, factor=1.):
"""
Covariance of the parameters with parameter paramName replaced by
factor * np.log(param) everywhere, and its true value is paramValue,
assuming linear propagation
Parameters
----------
covariance : `pandas.DataFrame`, mandatory
representing covariance matrix
paramName : int or str, mandatory
integer or parameter name specifying the position of the variable
whose logarithm must be taken
paramValue : float, mandatory
true/estimated value of the variable itself
factor : float, optional, defaults to 1.
Factor multiplying the natural logarithm. For example,
if the relevant transformation is going from 'f' to
-2.5 log10(f), the factor should be -2.5 /np.log(10)
Returns
-------
Examples
--------
"""
if isinstance(paramName, np.int):
cov = covariance.values
cov[:, paramName] = factor * cov[:, paramName] / paramValue
cov[paramName, :] = factor * cov[paramName, :] / paramValue
return cov
covariance[paramName] = factor * covariance[paramName] / paramValue
covariance.loc[paramName] = factor * covariance.loc[paramName] / paramValue
return covariance
def subcovariance(covariance, paramList, array=False):
"""
returns the covariance of a subset of parameters in a covariance dataFrame.
Parameters
----------
covariance : `pandas.DataFrame` representing square covariance matrix
with parameters as column names, and index as returned by covariance
paramList : list of strings, mandatory
list of parameters for which the subCovariance matrix is desired.
The set of parameters in paramList must be a subset of the columns
and indices of covariance
array : boolean, optional, defaults to False
if true, return `numpy.ndarray`, if False return `pandas.DataFrame`
Returns
-------
"""
df = covariance.ix[paramList, paramList]
if array:
return df.values
else:
return df
def covariance(covArray, paramNames=None, normalized=False):
"""
converts a covariance matrix in `numpy.ndarray` to a
`pandas.DataFrame`. If paramNames is not None, then the dataframe
is indexed by the parameter names, and has columns corresponding
to the parameter names enabling easy access by index or names.
Parameters
----------
covArray : `numpy.ndarray` of the covariance, mandatory
paramNames : iterable of strings, optional, defaults to None
normalized : Bool, optional, defaults to False
whether to return the normalized covariance matrix
Returns
-------
a `pandas.DataFrame` with column names and indexes given by the parameter
names. If paramNames is None, the return is a DataFrame with indexes and
column names chosen by pandas.
Examples
--------
>>> cov = covariance(covArray, paramNames=['t0', 'x0', 'x1, 'c'])
>>> cov.ix[['t0', 'x1'],['t0', 'x1']]
>>> cov.iloc[[0, 2], [0, 2]]
"""
l, w = np.shape(covArray)
# Check for the covariance matrix being square, not checking for symmetry
if l != w:
raise ValueError('The covariance matrix is not square; length!=width')
if paramNames is not None:
if len(paramNames) != w:
raise ValueError('The number of parameters must match the length'
' of the covariance matrix')
cov = pd.DataFrame(covArray, columns=paramNames, index=paramNames)
else:
cov = pd.DataFrame(covArray)
if not normalized:
return cov
# normalize if requested
stds = cov.values.diagonal()
for i, col in enumerate(cov.columns):
cov[col] = cov[col]/stds[i]
for i in range(len(cov)):
cov.iloc[i] = cov.iloc[i]/stds[i]
return cov
def generateCov(dims, seed=None, low=-0.5, high=0.5):
"""
generate a 2D semi-positive definite matrix of size dimsXdims. While
this will create different random matrices, the exact distribution of
the matrices has not been checked.
Parameters
----------
dims : integer, mandatory
size of the matrix
seed : integer, optional, defaults to None
sets the seed of the random number generator. If None,
numpy chooses the seed.
low : float, optional defaults to -1.
Entries are x * y, and the smallest value for x, or y is low
high : float, optional defaults to 1.
Entries are x * y, and the largest value for x, or y is high
"""
if seed is not None:
np.random.seed(seed)
x = np.random.uniform(low, high, size=dims)
y = np.random.uniform(low, high,size=dims)
m = np.outer(x, y)
return np.dot(m, m.transpose())
| mit |
yunfeilu/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
xyguo/scikit-learn | setup.py | 25 | 11732 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
import subprocess
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable, os.path.join(cwd,
'build_tools',
'cythonize.py'),
'sklearn'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required, nor Cythonization
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
if len(sys.argv) >= 2 and sys.argv[1] not in 'config':
# Cythonize if needed
print('Generating cython files')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
# Clean left-over .so file
for dirpath, dirnames, filenames in os.walk(
os.path.join(cwd, 'sklearn')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension in (".so", ".pyd", ".dll"):
pyx_file = str.replace(filename, extension, '.pyx')
print(pyx_file)
if not os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
hlin117/scikit-learn | sklearn/preprocessing/tests/test_label.py | 40 | 18519 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
# one-class case defaults to negative label
# For dense case:
inp = ["pos", "pos", "pos", "pos"]
lb = LabelBinarizer(sparse_output=False)
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# For sparse case:
lb = LabelBinarizer(sparse_output=True)
got = lb.fit_transform(inp)
assert_true(issparse(got))
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got.toarray())
assert_array_equal(lb.inverse_transform(got.toarray()), inp)
lb = LabelBinarizer(sparse_output=False)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert_equal(got.indices.dtype, got.indptr.dtype)
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert_equal(got.indices.dtype, got.indptr.dtype)
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/tseries/util.py | 9 | 3286 | import warnings
from pandas.compat import lrange
import numpy as np
from pandas.core.dtypes.common import _ensure_platform_int
from pandas.core.frame import DataFrame
import pandas.core.algorithms as algorithms
def pivot_annual(series, freq=None):
"""
Deprecated. Use ``pivot_table`` instead.
Group a series by years, taking leap years into account.
The output has as many rows as distinct years in the original series,
and as many columns as the length of a leap year in the units corresponding
to the original frequency (366 for daily frequency, 366*24 for hourly...).
The fist column of the output corresponds to Jan. 1st, 00:00:00,
while the last column corresponds to Dec, 31st, 23:59:59.
Entries corresponding to Feb. 29th are masked for non-leap years.
For example, if the initial series has a daily frequency, the 59th column
of the output always corresponds to Feb. 28th, the 61st column to Mar. 1st,
and the 60th column is masked for non-leap years.
With a hourly initial frequency, the (59*24)th column of the output always
correspond to Feb. 28th 23:00, the (61*24)th column to Mar. 1st, 00:00, and
the 24 columns between (59*24) and (61*24) are masked.
If the original frequency is less than daily, the output is equivalent to
``series.convert('A', func=None)``.
Parameters
----------
series : Series
freq : string or None, default None
Returns
-------
annual : DataFrame
"""
msg = "pivot_annual is deprecated. Use pivot_table instead"
warnings.warn(msg, FutureWarning)
index = series.index
year = index.year
years = algorithms.unique1d(year)
if freq is not None:
freq = freq.upper()
else:
freq = series.index.freq
if freq == 'D':
width = 366
offset = np.asarray(index.dayofyear) - 1
# adjust for leap year
offset[(~isleapyear(year)) & (offset >= 59)] += 1
columns = lrange(1, 367)
# todo: strings like 1/1, 1/25, etc.?
elif freq in ('M', 'BM'):
width = 12
offset = np.asarray(index.month) - 1
columns = lrange(1, 13)
elif freq == 'H':
width = 8784
grouped = series.groupby(series.index.year)
defaulted = grouped.apply(lambda x: x.reset_index(drop=True))
defaulted.index = defaulted.index.droplevel(0)
offset = np.asarray(defaulted.index)
offset[~isleapyear(year) & (offset >= 1416)] += 24
columns = lrange(1, 8785)
else:
raise NotImplementedError(freq)
flat_index = (year - years.min()) * width + offset
flat_index = _ensure_platform_int(flat_index)
values = np.empty((len(years), width))
values.fill(np.nan)
values.put(flat_index, series.values)
return DataFrame(values, index=years, columns=columns)
def isleapyear(year):
"""
Returns true if year is a leap year.
Parameters
----------
year : integer / sequence
A given (list of) year(s).
"""
msg = "isleapyear is deprecated. Use .is_leap_year property instead"
warnings.warn(msg, FutureWarning)
year = np.asarray(year)
return np.logical_or(year % 400 == 0,
np.logical_and(year % 4 == 0, year % 100 > 0))
| gpl-2.0 |
chrsrds/scikit-learn | examples/exercises/plot_cv_digits.py | 24 | 1175 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import datasets, svm
X, y = datasets.load_digits(return_X_y=True)
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
thypad/brew | test/test_selection_dynamic.py | 2 | 3344 | """
Tests for `brew.selection.dynamic` module. """
import numpy as np
import sklearn
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.cross_validation import train_test_split
from brew.base import Ensemble
from brew.generation.bagging import *
from brew.selection.dynamic.knora import *
N=10000
X, y = datasets.make_hastie_10_2(n_samples=N, random_state=1)
for i, yi in enumerate(set(y)):
y[y == yi] = i
Xtra, Xtst, ytra, ytst = train_test_split(X, y, test_size=0.10)
Xtra, Xval, ytra, yval = train_test_split(Xtra, ytra, test_size=0.30)
bag = Bagging(base_classifier=DecisionTreeClassifier(), n_classifiers=100)
bag.fit(Xtra, ytra)
class KNORA_UNION_VALID(KNORA):
def select(self, ensemble, x):
neighbors_X, neighbors_y = self.get_neighbors(x)
pool = []
for c in ensemble.classifiers:
for i, neighbor in enumerate(neighbors_X):
if c.predict(neighbor) == neighbors_y[i]:
pool.append(c)
break
weights = []
for clf in pool:
msk = clf.predict(neighbors_X) == neighbors_y
weights = weights + [sum(msk)]
return Ensemble(classifiers=pool), weights
class KNORA_ELIMINATE_VALID(KNORA):
def select(self, ensemble, x):
neighbors_X, neighbors_y = self.get_neighbors(x)
k = self.K
pool = []
while k > 0:
nn_X = neighbors_X[:k,:]
nn_y = neighbors_y[:k]
for i, c in enumerate(ensemble.classifiers):
if np.all(c.predict(nn_X) == nn_y[np.newaxis, :]):
pool.append(c)
if not pool: # empty
k = k-1
else:
break
if not pool: # still empty
# select the classifier that recognizes
# more samples in the whole neighborhood
# also select classifiers that recognize
# the same number of neighbors
pool = self._get_best_classifiers(ensemble, neighbors_X, neighbors_y, x)
return Ensemble(classifiers=pool), None
class TestKNORA_E():
def test_simple(self):
selector_pred = KNORA_ELIMINATE(Xval=Xval, yval=yval)
selector_true = KNORA_ELIMINATE_VALID(Xval=Xval, yval=yval)
for x in Xtst:
pool_pred, w_pred = selector_pred.select(bag.ensemble, x)
pool_true, w_true = selector_true.select(bag.ensemble, x)
assert w_pred == w_true
assert len(pool_pred) == len(pool_true)
for c_p, c_t in zip(pool_pred.classifiers, pool_true.classifiers):
assert c_p == c_t
class TestKNORA_U():
def test_simple(self):
selector_pred = KNORA_UNION(Xval=Xval, yval=yval)
selector_true = KNORA_UNION_VALID(Xval=Xval, yval=yval)
for x in Xtst:
pool_pred, w_pred = selector_pred.select(bag.ensemble, x)
pool_true, w_true = selector_true.select(bag.ensemble, x)
assert len(pool_pred) == len(pool_true)
for c_p, c_t in zip(pool_pred.classifiers, pool_true.classifiers):
assert c_p == c_t
assert len(w_pred) == len(w_true)
assert np.all(np.array(w_pred) == np.array(w_true))
| mit |
BryanCutler/spark | python/pyspark/pandas/tests/test_config.py | 1 | 6435 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import pandas as ps
from pyspark.pandas import config
from pyspark.pandas.config import Option, DictWrapper
from pyspark.pandas.testing.utils import ReusedSQLTestCase
class ConfigTest(ReusedSQLTestCase):
def setUp(self):
config._options_dict["test.config"] = Option(key="test.config", doc="", default="default")
config._options_dict["test.config.list"] = Option(
key="test.config.list", doc="", default=[], types=list
)
config._options_dict["test.config.float"] = Option(
key="test.config.float", doc="", default=1.2, types=float
)
config._options_dict["test.config.int"] = Option(
key="test.config.int",
doc="",
default=1,
types=int,
check_func=(lambda v: v > 0, "bigger then 0"),
)
config._options_dict["test.config.int.none"] = Option(
key="test.config.int", doc="", default=None, types=(int, type(None))
)
def tearDown(self):
ps.reset_option("test.config")
del config._options_dict["test.config"]
del config._options_dict["test.config.list"]
del config._options_dict["test.config.float"]
del config._options_dict["test.config.int"]
del config._options_dict["test.config.int.none"]
def test_get_set_reset_option(self):
self.assertEqual(ps.get_option("test.config"), "default")
ps.set_option("test.config", "value")
self.assertEqual(ps.get_option("test.config"), "value")
ps.reset_option("test.config")
self.assertEqual(ps.get_option("test.config"), "default")
def test_get_set_reset_option_different_types(self):
ps.set_option("test.config.list", [1, 2, 3, 4])
self.assertEqual(ps.get_option("test.config.list"), [1, 2, 3, 4])
ps.set_option("test.config.float", 5.0)
self.assertEqual(ps.get_option("test.config.float"), 5.0)
ps.set_option("test.config.int", 123)
self.assertEqual(ps.get_option("test.config.int"), 123)
self.assertEqual(ps.get_option("test.config.int.none"), None) # default None
ps.set_option("test.config.int.none", 123)
self.assertEqual(ps.get_option("test.config.int.none"), 123)
ps.set_option("test.config.int.none", None)
self.assertEqual(ps.get_option("test.config.int.none"), None)
def test_different_types(self):
with self.assertRaisesRegex(ValueError, "was <class 'int'>"):
ps.set_option("test.config.list", 1)
with self.assertRaisesRegex(ValueError, "however, expected types are"):
ps.set_option("test.config.float", "abc")
with self.assertRaisesRegex(ValueError, "[<class 'int'>]"):
ps.set_option("test.config.int", "abc")
with self.assertRaisesRegex(ValueError, "(<class 'int'>, <class 'NoneType'>)"):
ps.set_option("test.config.int.none", "abc")
def test_check_func(self):
with self.assertRaisesRegex(ValueError, "bigger then 0"):
ps.set_option("test.config.int", -1)
def test_unknown_option(self):
with self.assertRaisesRegex(config.OptionError, "No such option"):
ps.get_option("unknown")
with self.assertRaisesRegex(config.OptionError, "Available options"):
ps.set_option("unknown", "value")
with self.assertRaisesRegex(config.OptionError, "test.config"):
ps.reset_option("unknown")
def test_namespace_access(self):
try:
self.assertEqual(ps.options.compute.max_rows, ps.get_option("compute.max_rows"))
ps.options.compute.max_rows = 0
self.assertEqual(ps.options.compute.max_rows, 0)
self.assertTrue(isinstance(ps.options.compute, DictWrapper))
wrapper = ps.options.compute
self.assertEqual(wrapper.max_rows, ps.get_option("compute.max_rows"))
wrapper.max_rows = 1000
self.assertEqual(ps.options.compute.max_rows, 1000)
self.assertRaisesRegex(config.OptionError, "No such option", lambda: ps.options.compu)
self.assertRaisesRegex(
config.OptionError, "No such option", lambda: ps.options.compute.max
)
self.assertRaisesRegex(
config.OptionError, "No such option", lambda: ps.options.max_rows1
)
with self.assertRaisesRegex(config.OptionError, "No such option"):
ps.options.compute.max = 0
with self.assertRaisesRegex(config.OptionError, "No such option"):
ps.options.compute = 0
with self.assertRaisesRegex(config.OptionError, "No such option"):
ps.options.com = 0
finally:
ps.reset_option("compute.max_rows")
def test_dir_options(self):
self.assertTrue("compute.default_index_type" in dir(ps.options))
self.assertTrue("plotting.sample_ratio" in dir(ps.options))
self.assertTrue("default_index_type" in dir(ps.options.compute))
self.assertTrue("sample_ratio" not in dir(ps.options.compute))
self.assertTrue("default_index_type" not in dir(ps.options.plotting))
self.assertTrue("sample_ratio" in dir(ps.options.plotting))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_config import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
theoryno3/scikit-learn | sklearn/externals/joblib/parallel.py | 29 | 28665 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
import os
import sys
import gc
import warnings
from collections import Sized
from math import sqrt
import functools
import time
import threading
import itertools
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
###############################################################################
# CPU that works also when multiprocessing is not installed (python2.5)
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateApply(object):
""" A non-delayed apply function.
"""
def __init__(self, func, args, kwargs):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = func(*args, **kwargs)
def get(self):
return self.results
###############################################################################
class CallBack(object):
""" Callback used by parallel: it is used for progress reporting, and
to add data to be processed
"""
def __init__(self, index, parallel):
self.parallel = parallel
self.index = index
def __call__(self, out):
self.parallel.print_progress(self.index)
if self.parallel._original_iterable:
self.parallel.dispatch_next()
class LockedIterator(object):
"""Wrapper to protect a thread-unsafe iterable against concurrent access.
A Python generator is not thread-safe by default and will raise
ValueError("generator already executing") if two threads consume it
concurrently.
In joblib this could typically happen when the passed iterator is a
generator expression and pre_dispatch != 'all'. In that case a callback is
passed to the multiprocessing apply_async call and helper threads will
trigger the consumption of the source iterable in the dispatch_next
method.
"""
def __init__(self, it):
self._lock = threading.Lock()
self._it = iter(it)
def __iter__(self):
return self
def next(self):
with self._lock:
return next(self._it)
# For Python 3 compat
__next__ = next
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs : int
The number of jobs to use for the computation. If -1 all CPUs
are used. If 1 is given, no parallel computing code is used
at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend : str or None
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose : int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch : {'all', integer, or expression, as in '3*n_jobs'}
The amount of jobs to be pre-dispatched. Default is 'all',
but it may be memory consuming, for instance if each job
involves a lot of a data.
temp_folder : str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes : int, str, or None, optional, 100e6 (100MB) by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
mmap_mode : 'r', 'r+' or 'c'
Mode for the created memmap datastructure. See the documentation of
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
automatically to avoid zeroing the data on unpickling.
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend=None, verbose=0, pre_dispatch='all',
temp_folder=None, max_nbytes=100e6, mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
self.pre_dispatch = pre_dispatch
self._pool = None
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it.
self._output = None
self._jobs = list()
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
def dispatch(self, func, args, kwargs):
""" Queue the function for computing, with or without multiprocessing
"""
if self._pool is None:
job = ImmediateApply(func, args, kwargs)
index = len(self._jobs)
if not _verbosity_filter(index, self.verbose):
self._print('Done %3i jobs | elapsed: %s',
(index + 1,
short_format_time(time.time() - self._start_time)
))
self._jobs.append(job)
self.n_dispatched += 1
else:
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
try:
self._lock.acquire()
job = self._pool.apply_async(SafeFunction(func), args,
kwargs, callback=CallBack(self.n_dispatched, self))
self._jobs.append(job)
self.n_dispatched += 1
except AssertionError:
print('[Parallel] Pool seems closed')
finally:
self._lock.release()
def dispatch_next(self):
""" Dispatch more data for parallel processing
"""
self._dispatch_amount += 1
while self._dispatch_amount:
try:
# XXX: possible race condition shuffling the order of
# dispatches in the next two lines.
func, args, kwargs = next(self._original_iterable)
self.dispatch(func, args, kwargs)
self._dispatch_amount -= 1
except ValueError:
""" Race condition in accessing a generator, we skip,
the dispatch will be done later.
"""
except StopIteration:
self._iterating = False
self._original_iterable = None
return
def _print(self, msg, msg_args):
""" Display the message on stout or stderr depending on verbosity
"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self, index):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterable:
if _verbosity_filter(index, self.verbose):
return
self._print('Done %3i jobs | elapsed: %s',
(index + 1,
short_format_time(elapsed_time),
))
else:
# We are finished dispatching
queue_length = self.n_dispatched
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (queue_length - index + 1
- self._pre_dispatch_amount)
frequency = (queue_length // self.verbose) + 1
is_last_item = (index + 1 == queue_length)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
queue_length,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job queue can be filling up as
# we empty it
if hasattr(self, '_lock'):
self._lock.acquire()
job = self._jobs.pop(0)
if hasattr(self, '_lock'):
self._lock.release()
try:
self._output.append(job.get())
except tuple(self.exceptions) as exception:
try:
self._aborting = True
self._lock.acquire()
if isinstance(exception,
(KeyboardInterrupt, WorkerInterrupt)):
# We have captured a user interruption, clean up
# everything
if hasattr(self, '_pool'):
self._pool.close()
self._pool.terminate()
# We can now allow subprocesses again
os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0)
raise exception
elif isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (
this_report,
exception.message,
)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
raise exception_type(report)
raise exception
finally:
self._lock.release()
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
if n_jobs < 0 and mp is not None:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
self._lock = threading.Lock()
# Whether or not to set an environment flag to track
# multiple process spawning
set_environ_flag = False
if (n_jobs is None or mp is None or n_jobs == 1):
n_jobs = 1
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=2)
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=2)
else:
already_forked = int(os.environ.get('__JOBLIB_SPAWNED_PARALLEL__', 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Make sure to free as much memory as possible before forking
gc.collect()
# Set an environment variable to avoid infinite loops
set_environ_flag = True
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
pre_dispatch = self.pre_dispatch
if isinstance(iterable, Sized):
# We are given a sized (an object with len). No need to be lazy.
pre_dispatch = 'all'
if pre_dispatch == 'all' or n_jobs == 1:
self._original_iterable = None
self._pre_dispatch_amount = 0
else:
# The dispatch mechanism relies on multiprocessing helper threads
# to dispatch tasks from the original iterable concurrently upon
# job completions. As Python generators are not thread-safe we
# need to wrap it with a lock
iterable = LockedIterator(iterable)
self._original_iterable = iterable
self._dispatch_amount = 0
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions
iterable = itertools.islice(iterable, pre_dispatch)
self._start_time = time.time()
self.n_dispatched = 0
try:
if set_environ_flag:
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
self._iterating = True
for function, args, kwargs in iterable:
self.dispatch(function, args, kwargs)
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output),
len(self._output),
short_format_time(elapsed_time)
))
finally:
if n_jobs > 1:
self._pool.close()
self._pool.terminate() # terminate does a join()
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
AlexRobson/scikit-learn | sklearn/metrics/__init__.py | 214 | 3440 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.12/_downloads/plot_stats_spatio_temporal_cluster_sensors.py | 4 | 7427 | """
.. _stats_cluster_sensors_2samp_spatial:
=====================================================
Spatiotemporal permutation F-test on full sensor data
=====================================================
Tests for differential evoked responses in at least
one condition using a permutation clustering test.
The FieldTrip neighbor templates will be used to determine
the adjacency between sensors. This serves as a spatial prior
to the clustering. Significant spatiotemporal clusters will then
be visualized using custom matplotlib code.
"""
# Authors: Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mne.viz import plot_topomap
import mne
from mne.stats import spatio_temporal_cluster_test
from mne.datasets import sample
from mne.channels import read_ch_connectivity
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = {'Aud_L': 1, 'Aud_R': 2, 'Vis_L': 3, 'Vis_R': 4}
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 30)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for the channel of interest
# ---------------------------------------
picks = mne.pick_types(raw.info, meg='mag', eog=True)
reject = dict(mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, reject=reject, preload=True)
epochs.drop_channels(['EOG 061'])
epochs.equalize_event_counts(event_id, copy=False)
condition_names = 'Aud_L', 'Aud_R', 'Vis_L', 'Vis_R'
X = [epochs[k].get_data() for k in condition_names] # as 3D matrix
X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering
###############################################################################
# Load FieldTrip neighbor definition to setup sensor connectivity
# ---------------------------------------------------------------
connectivity, ch_names = read_ch_connectivity('neuromag306mag')
print(type(connectivity)) # it's a sparse matrix!
plt.imshow(connectivity.toarray(), cmap='gray', origin='lower',
interpolation='nearest')
plt.xlabel('{} Magnetometers'.format(len(ch_names)))
plt.ylabel('{} Magnetometers'.format(len(ch_names)))
plt.title('Between-sensor adjacency')
###############################################################################
# Compute permutation statistic
# -----------------------------
#
# How does it work? We use clustering to `bind` together features which are
# similar. Our features are the magnetic fields measured over our sensor
# array at different times. This reduces the multiple comparison problem.
# To compute the actual test-statistic, we first sum all F-values in all
# clusters. We end up with one statistic for each cluster.
# Then we generate a distribution from the data by shuffling our conditions
# between our samples and recomputing our clusters and the test statistics.
# We test for the significance of a given cluster by computing the probability
# of observing a cluster of that size. For more background read:
# Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
# MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
# doi:10.1016/j.jneumeth.2007.03.024
# set cluster threshold
threshold = 50.0 # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.001
cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
threshold=threshold, tail=1,
n_jobs=1,
connectivity=connectivity)
T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
###############################################################################
# Note. The same functions work with source estimate. The only differences
# are the origin of the data, the size, and the connectivity definition.
# It can be used for single trials or for groups of subjects.
#
# Visualize clusters
# ------------------
# configure variables for visualization
times = epochs.times * 1e3
colors = 'r', 'r', 'steelblue', 'steelblue'
linestyles = '-', '--', '-', '--'
# grand average as numpy arrray
grand_ave = np.array(X).mean(axis=1)
# get sensor positions via layout
pos = mne.find_layout(epochs.info).pos
# loop over significant clusters
for i_clu, clu_idx in enumerate(good_cluster_inds):
# unpack cluster information, get unique indices
time_inds, space_inds = np.squeeze(clusters[clu_idx])
ch_inds = np.unique(space_inds)
time_inds = np.unique(time_inds)
# get topography for F stat
f_map = T_obs[time_inds, ...].mean(axis=0)
# get signals at significant sensors
signals = grand_ave[..., ch_inds].mean(axis=-1)
sig_times = times[time_inds]
# create spatial mask
mask = np.zeros((f_map.shape[0], 1), dtype=bool)
mask[ch_inds, :] = True
# initialize figure
fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
title = 'Cluster #{0}'.format(i_clu + 1)
fig.suptitle(title, fontsize=14)
# plot average test statistic and mark significant sensors
image, _ = plot_topomap(f_map, pos, mask=mask, axes=ax_topo,
cmap='Reds', vmin=np.min, vmax=np.max)
# advanced matplotlib for showing image with figure and colorbar
# in one plot
divider = make_axes_locatable(ax_topo)
# add axes for colorbar
ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
plt.colorbar(image, cax=ax_colorbar)
ax_topo.set_xlabel('Averaged F-map ({:0.1f} - {:0.1f} ms)'.format(
*sig_times[[0, -1]]
))
# add new axis for time courses and plot time courses
ax_signals = divider.append_axes('right', size='300%', pad=1.2)
for signal, name, col, ls in zip(signals, condition_names, colors,
linestyles):
ax_signals.plot(times, signal, color=col, linestyle=ls, label=name)
# add information
ax_signals.axvline(0, color='k', linestyle=':', label='stimulus onset')
ax_signals.set_xlim([times[0], times[-1]])
ax_signals.set_xlabel('time [ms]')
ax_signals.set_ylabel('evoked magnetic fields [fT]')
# plot significant time range
ymin, ymax = ax_signals.get_ylim()
ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
color='orange', alpha=0.3)
ax_signals.legend(loc='lower right')
ax_signals.set_ylim(ymin, ymax)
# clean up viz
mne.viz.tight_layout(fig=fig)
fig.subplots_adjust(bottom=.05)
plt.show()
###############################################################################
# Exercises
# ----------
#
# - What is the smallest p-value you can obtain, given the finite number of
# permutations?
# - use an F distribution to compute the threshold by traditional significance
# levels. Hint: take a look at ``scipy.stats.distributions.f``
| bsd-3-clause |
shigh/py3d3v | model/figs/gaussian-screen.py | 1 | 1277 |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import erf
# Generate short and long range force values
r = np.linspace(.01, 1, 100)
beta_vals = np.arange(1, 5)
sqpi = np.sqrt(np.pi)
E_vals = []
F_vals = []
for beta in beta_vals:
E = sqpi*erf(r*beta)/(2*r**2)-beta*np.exp(-beta**2*r**2)/r
E = E/(2*sqpi**3)
E_vals.append((E, beta))
F = 1/(4*np.pi*r**2) - E
F_vals.append((F, beta))
# Plot short range forces
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(1,2,1)
for E, beta in E_vals:
ax.plot(r, E, label="$\\beta=%i$"%(beta,))
ax.set_title("Field produced by Gaussian screen")
ax.set_xlabel("$r$")
ax.set_ylabel("$E(r)$")
ax.legend()
# Plot long range forces
#fig = plt.figure()
ax = fig.add_subplot(122)
for F, beta in F_vals:
ax.semilogy(r, F, label="$\\beta=%i$"%(beta,))
ax.set_title("Short range force using Gaussian screen")
ax.set_xlabel("$r$")
ax.set_ylabel("$E(r)$")
ax.legend(loc="lower left")
fig.savefig("p3m-gaussian-fields.pdf")
# CIC
x_vals = np.linspace(-1, 1, 1000)
s_vals = np.zeros_like(x_vals)
s_vals[np.abs(x_vals)<.5] = 1
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x_vals, s_vals)
ax.set_ylim((0, 1.1))
ax.set_title("CIC Particle Shape")
fig.savefig("cic.pdf")
| gpl-2.0 |
andrewcmyers/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 79 | 2464 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.generator_io import generator_input_fn
| apache-2.0 |
jmontoyam/mne-python | mne/viz/tests/test_topomap.py | 3 | 12543 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises, assert_array_equal
from nose.tools import assert_true, assert_equal
from mne import read_evokeds, read_proj
from mne.io import read_raw_fif
from mne.io.constants import FIFF
from mne.io.pick import pick_info, channel_indices_by_type
from mne.channels import read_layout, make_eeg_layout
from mne.datasets import testing
from mne.time_frequency.tfr import AverageTFR
from mne.utils import slow_test, run_tests_if_main
from mne.viz import plot_evoked_topomap, plot_projs_topomap
from mne.viz.topomap import (_check_outlines, _onselect, plot_topomap,
plot_psds_topomap)
from mne.viz.utils import _find_peaks, _fake_click
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg-proj.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
layout = read_layout('Vectorview-all')
def _get_raw():
"""Get raw data."""
return read_raw_fif(raw_fname, preload=False, add_eeg_ref=False)
@slow_test
@testing.requires_testing_data
def test_plot_topomap():
"""Test topomap plotting."""
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
# evoked
warnings.simplefilter('always')
res = 16
evoked = read_evokeds(evoked_fname, 'Left Auditory',
baseline=(None, 0))
# Test animation
_, anim = evoked.animate_topomap(ch_type='grad', times=[0, 0.1],
butterfly=False)
anim._func(1) # _animate has to be tested separately on 'Agg' backend.
plt.close('all')
ev_bad = evoked.copy().pick_types(meg=False, eeg=True)
ev_bad.pick_channels(ev_bad.ch_names[:2])
ev_bad.plot_topomap(times=ev_bad.times[:2] - 1e-6) # auto, plots EEG
assert_raises(ValueError, ev_bad.plot_topomap, ch_type='mag')
assert_raises(TypeError, ev_bad.plot_topomap, head_pos='foo')
assert_raises(KeyError, ev_bad.plot_topomap, head_pos=dict(foo='bar'))
assert_raises(ValueError, ev_bad.plot_topomap, head_pos=dict(center=0))
assert_raises(ValueError, ev_bad.plot_topomap, times=[-100]) # bad time
assert_raises(ValueError, ev_bad.plot_topomap, times=[[0]]) # bad time
assert_raises(ValueError, ev_bad.plot_topomap, times=[[0]]) # bad time
evoked.plot_topomap(0.1, layout=layout, scale=dict(mag=0.1))
plt.close('all')
axes = [plt.subplot(221), plt.subplot(222)]
evoked.plot_topomap(axes=axes, colorbar=False)
plt.close('all')
evoked.plot_topomap(times=[-0.1, 0.2])
plt.close('all')
mask = np.zeros_like(evoked.data, dtype=bool)
mask[[1, 5], :] = True
evoked.plot_topomap(ch_type='mag', outlines=None)
times = [0.1]
evoked.plot_topomap(times, ch_type='eeg', res=res, scale=1)
evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res)
evoked.plot_topomap(times, ch_type='planar1', res=res)
evoked.plot_topomap(times, ch_type='planar2', res=res)
evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res,
show_names=True, mask_params={'marker': 'x'})
plt.close('all')
assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
res=res, average=-1000)
assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
res=res, average='hahahahah')
p = evoked.plot_topomap(times, ch_type='grad', res=res,
show_names=lambda x: x.replace('MEG', ''),
image_interp='bilinear')
subplot = [x for x in p.get_children() if
isinstance(x, matplotlib.axes.Subplot)][0]
assert_true(all('MEG' not in x.get_text()
for x in subplot.get_children()
if isinstance(x, matplotlib.text.Text)))
# Plot array
for ch_type in ('mag', 'grad'):
evoked_ = evoked.copy().pick_types(eeg=False, meg=ch_type)
plot_topomap(evoked_.data[:, 0], evoked_.info)
# fail with multiple channel types
assert_raises(ValueError, plot_topomap, evoked.data[0, :], evoked.info)
# Test title
def get_texts(p):
return [x.get_text() for x in p.get_children() if
isinstance(x, matplotlib.text.Text)]
p = evoked.plot_topomap(times, ch_type='eeg', res=res, average=0.01)
assert_equal(len(get_texts(p)), 0)
p = evoked.plot_topomap(times, ch_type='eeg', title='Custom', res=res)
texts = get_texts(p)
assert_equal(len(texts), 1)
assert_equal(texts[0], 'Custom')
plt.close('all')
# delaunay triangulation warning
with warnings.catch_warnings(record=True): # can't show
warnings.simplefilter('always')
evoked.plot_topomap(times, ch_type='mag', layout=None, res=res)
assert_raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag',
proj='interactive') # projs have already been applied
# change to no-proj mode
evoked = read_evokeds(evoked_fname, 'Left Auditory',
baseline=(None, 0), proj=False)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
evoked.plot_topomap(0.1, 'mag', proj='interactive', res=res)
assert_raises(RuntimeError, plot_evoked_topomap, evoked,
np.repeat(.1, 50))
assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])
with warnings.catch_warnings(record=True): # file conventions
warnings.simplefilter('always')
projs = read_proj(ecg_fname)
projs = [pp for pp in projs if pp['desc'].lower().find('eeg') < 0]
plot_projs_topomap(projs, res=res, colorbar=True)
plt.close('all')
ax = plt.subplot(111)
plot_projs_topomap([projs[0]], res=res, axes=ax) # test axes param
plt.close('all')
for ch in evoked.info['chs']:
if ch['coil_type'] == FIFF.FIFFV_COIL_EEG:
ch['loc'].fill(0)
# Remove extra digitization point, so EEG digitization points
# correspond with the EEG electrodes
del evoked.info['dig'][85]
pos = make_eeg_layout(evoked.info).pos[:, :2]
pos, outlines = _check_outlines(pos, 'head')
assert_true('head' in outlines.keys())
assert_true('nose' in outlines.keys())
assert_true('ear_left' in outlines.keys())
assert_true('ear_right' in outlines.keys())
assert_true('autoshrink' in outlines.keys())
assert_true(outlines['autoshrink'])
assert_true('clip_radius' in outlines.keys())
assert_array_equal(outlines['clip_radius'], 0.5)
pos, outlines = _check_outlines(pos, 'skirt')
assert_true('head' in outlines.keys())
assert_true('nose' in outlines.keys())
assert_true('ear_left' in outlines.keys())
assert_true('ear_right' in outlines.keys())
assert_true('autoshrink' in outlines.keys())
assert_true(not outlines['autoshrink'])
assert_true('clip_radius' in outlines.keys())
assert_array_equal(outlines['clip_radius'], 0.625)
pos, outlines = _check_outlines(pos, 'skirt',
head_pos={'scale': [1.2, 1.2]})
assert_array_equal(outlines['clip_radius'], 0.75)
# Plot skirt
evoked.plot_topomap(times, ch_type='eeg', outlines='skirt')
# Pass custom outlines without patch
evoked.plot_topomap(times, ch_type='eeg', outlines=outlines)
plt.close('all')
# Test interactive cmap
fig = plot_evoked_topomap(evoked, times=[0., 0.1], ch_type='eeg',
cmap=('Reds', True), title='title')
fig.canvas.key_press_event('up')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('down')
cbar = fig.get_axes()[0].CB # Fake dragging with mouse.
ax = cbar.cbar.ax
_fake_click(fig, ax, (0.1, 0.1))
_fake_click(fig, ax, (0.1, 0.2), kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
_fake_click(fig, ax, (0.1, 0.1), button=3)
_fake_click(fig, ax, (0.1, 0.2), button=3, kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
plt.close('all')
# Pass custom outlines with patch callable
def patch():
return Circle((0.5, 0.4687), radius=.46,
clip_on=True, transform=plt.gca().transAxes)
outlines['patch'] = patch
plot_evoked_topomap(evoked, times, ch_type='eeg', outlines=outlines)
# Remove digitization points. Now topomap should fail
evoked.info['dig'] = None
assert_raises(RuntimeError, plot_evoked_topomap, evoked,
times, ch_type='eeg')
plt.close('all')
# Error for missing names
n_channels = len(pos)
data = np.ones(n_channels)
assert_raises(ValueError, plot_topomap, data, pos, show_names=True)
# Test error messages for invalid pos parameter
pos_1d = np.zeros(n_channels)
pos_3d = np.zeros((n_channels, 2, 2))
assert_raises(ValueError, plot_topomap, data, pos_1d)
assert_raises(ValueError, plot_topomap, data, pos_3d)
assert_raises(ValueError, plot_topomap, data, pos[:3, :])
pos_x = pos[:, :1]
pos_xyz = np.c_[pos, np.zeros(n_channels)[:, np.newaxis]]
assert_raises(ValueError, plot_topomap, data, pos_x)
assert_raises(ValueError, plot_topomap, data, pos_xyz)
# An #channels x 4 matrix should work though. In this case (x, y, width,
# height) is assumed.
pos_xywh = np.c_[pos, np.zeros((n_channels, 2))]
plot_topomap(data, pos_xywh)
plt.close('all')
# Test peak finder
axes = [plt.subplot(131), plt.subplot(132)]
with warnings.catch_warnings(record=True): # rightmost column
evoked.plot_topomap(times='peaks', axes=axes)
plt.close('all')
evoked.data = np.zeros(evoked.data.shape)
evoked.data[50][1] = 1
assert_array_equal(_find_peaks(evoked, 10), evoked.times[1])
evoked.data[80][100] = 1
assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 100]])
evoked.data[2][95] = 2
assert_array_equal(_find_peaks(evoked, 10), evoked.times[[1, 95]])
assert_array_equal(_find_peaks(evoked, 1), evoked.times[95])
def test_plot_tfr_topomap():
"""Test plotting of TFR data."""
import matplotlib as mpl
import matplotlib.pyplot as plt
raw = _get_raw()
times = np.linspace(-0.1, 0.1, 200)
n_freqs = 3
nave = 1
rng = np.random.RandomState(42)
data = rng.randn(len(raw.ch_names), n_freqs, len(times))
tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)
tfr.plot_topomap(ch_type='mag', tmin=0.05, tmax=0.150, fmin=0, fmax=10,
res=16)
eclick = mpl.backend_bases.MouseEvent('button_press_event',
plt.gcf().canvas, 0, 0, 1)
eclick.xdata = eclick.ydata = 0.1
eclick.inaxes = plt.gca()
erelease = mpl.backend_bases.MouseEvent('button_release_event',
plt.gcf().canvas, 0.9, 0.9, 1)
erelease.xdata = 0.3
erelease.ydata = 0.2
pos = [[0.11, 0.11], [0.25, 0.5], [0.0, 0.2], [0.2, 0.39]]
_onselect(eclick, erelease, tfr, pos, 'grad', 1, 3, 1, 3, 'RdBu_r', list())
_onselect(eclick, erelease, tfr, pos, 'mag', 1, 3, 1, 3, 'RdBu_r', list())
eclick.xdata = eclick.ydata = 0.
erelease.xdata = erelease.ydata = 0.9
tfr._onselect(eclick, erelease, None, 'mean', None)
plt.close('all')
# test plot_psds_topomap
info = raw.info.copy()
chan_inds = channel_indices_by_type(info)
info = pick_info(info, chan_inds['grad'][:4])
fig, axes = plt.subplots()
freqs = np.arange(3., 9.5)
bands = [(4, 8, 'Theta')]
psd = np.random.rand(len(info['ch_names']), freqs.shape[0])
plot_psds_topomap(psd, freqs, info, bands=bands, axes=[axes])
run_tests_if_main()
| bsd-3-clause |
architecture-building-systems/CEAforArcGIS | cea/plots/colors.py | 2 | 1985 | """
This is the official list of CEA colors to use in plots
"""
import os
import pandas as pd
import yaml
import warnings
import functools
from typing import List, Callable
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
COLORS_TO_RGB = {"red": "rgb(240,75,91)",
"red_light": "rgb(246,148,143)",
"red_lighter": "rgb(252,217,210)",
"blue": "rgb(63,192,194)",
"blue_light": "rgb(171,221,222)",
"blue_lighter": "rgb(225,242,242)",
"yellow": "rgb(255,209,29)",
"yellow_light": "rgb(255,225,133)",
"yellow_lighter": "rgb(255,243,211)",
"brown": "rgb(174,148,72)",
"brown_light": "rgb(201,183,135)",
"brown_lighter": "rgb(233,225,207)",
"purple": "rgb(171,95,127)",
"purple_light": "rgb(198,149,167)",
"purple_lighter": "rgb(231,214,219)",
"green": "rgb(126,199,143)",
"green_light": "rgb(178,219,183)",
"green_lighter": "rgb(227,241,228)",
"grey": "rgb(68,76,83)",
"grey_light": "rgb(126,127,132)",
"black": "rgb(35,31,32)",
"white": "rgb(255,255,255)",
"orange": "rgb(245,131,69)",
"orange_light": "rgb(248,159,109)",
"orange_lighter": "rgb(254,220,198)"}
def color_to_rgb(color):
try:
return COLORS_TO_RGB[color]
except KeyError:
import re
if re.match("rgb\(\s*\d+\s*,\s*\d+\s*,\s*\d+\s*\)", color):
# already an rgb formatted color
return color
return COLORS_TO_RGB["black"] | mit |
suku248/nest-simulator | pynest/nest/raster_plot.py | 15 | 9348 | # -*- coding: utf-8 -*-
#
# raster_plot.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
""" Functions for raster plotting."""
import nest
import numpy
__all__ = [
'extract_events',
'from_data',
'from_device',
'from_file',
'from_file_numpy',
'from_file_pandas'
]
def extract_events(data, time=None, sel=None):
"""Extract all events within a given time interval.
Both time and sel may be used at the same time such that all
events are extracted for which both conditions are true.
Parameters
----------
data : list
Matrix such that
data[:,0] is a vector of all node_ids and
data[:,1] a vector with the corresponding time stamps.
time : list, optional
List with at most two entries such that
time=[t_max] extracts all events with t< t_max
time=[t_min, t_max] extracts all events with t_min <= t < t_max
sel : list, optional
List of node_ids such that
sel=[node_id1, ... , node_idn] extracts all events from these node_ids.
All others are discarded.
Returns
-------
numpy.array
List of events as (node_id, t) tuples
"""
val = []
if time:
t_max = time[-1]
if len(time) > 1:
t_min = time[0]
else:
t_min = 0
for v in data:
t = v[1]
node_id = v[0]
if time and (t < t_min or t >= t_max):
continue
if not sel or node_id in sel:
val.append(v)
return numpy.array(val)
def from_data(data, sel=None, **kwargs):
"""Plot raster plot from data array.
Parameters
----------
data : list
Matrix such that
data[:,0] is a vector of all node_ids and
data[:,1] a vector with the corresponding time stamps.
sel : list, optional
List of node_ids such that
sel=[node_id1, ... , node_idn] extracts all events from these node_ids.
All others are discarded.
kwargs:
Parameters passed to _make_plot
"""
if len(data) == 0:
raise nest.kernel.NESTError("No data to plot.")
ts = data[:, 1]
d = extract_events(data, sel=sel)
ts1 = d[:, 1]
node_ids = d[:, 0]
return _make_plot(ts, ts1, node_ids, data[:, 0], **kwargs)
def from_file(fname, **kwargs):
"""Plot raster from file.
Parameters
----------
fname : str or tuple(str) or list(str)
File name or list of file names
If a list of files is given, the data from them is concatenated as if
it had been stored in a single file - useful when MPI is enabled and
data is logged separately for each MPI rank, for example.
kwargs:
Parameters passed to _make_plot
"""
if isinstance(fname, str):
fname = [fname]
if isinstance(fname, (list, tuple)):
try:
global pandas
pandas = __import__('pandas')
from_file_pandas(fname, **kwargs)
except ImportError:
from_file_numpy(fname, **kwargs)
else:
print('fname should be one of str/list(str)/tuple(str).')
def from_file_pandas(fname, **kwargs):
"""Use pandas."""
data = None
for f in fname:
dataFrame = pandas.read_table(f, header=2, skipinitialspace=True)
newdata = dataFrame.values
if data is None:
data = newdata
else:
data = numpy.concatenate((data, newdata))
return from_data(data, **kwargs)
def from_file_numpy(fname, **kwargs):
"""Use numpy."""
data = None
for f in fname:
newdata = numpy.loadtxt(f, skiprows=3)
if data is None:
data = newdata
else:
data = numpy.concatenate((data, newdata))
return from_data(data, **kwargs)
def from_device(detec, **kwargs):
"""
Plot raster from a spike recorder.
Parameters
----------
detec : TYPE
Description
kwargs:
Parameters passed to _make_plot
Raises
------
nest.kernel.NESTError
"""
type_id = nest.GetDefaults(detec.get('model'), 'type_id')
if not type_id == "spike_recorder":
raise nest.kernel.NESTError("Please provide a spike_recorder.")
if detec.get('record_to') == "memory":
ts, node_ids = _from_memory(detec)
if not len(ts):
raise nest.kernel.NESTError("No events recorded!")
if "title" not in kwargs:
kwargs["title"] = "Raster plot from device '%i'" % detec.get('global_id')
if detec.get('time_in_steps'):
xlabel = "Steps"
else:
xlabel = "Time (ms)"
return _make_plot(ts, ts, node_ids, node_ids, xlabel=xlabel, **kwargs)
elif detec.get("record_to") == "ascii":
fname = detec.get("filenames")
return from_file(fname, **kwargs)
else:
raise nest.kernel.NESTError("No data to plot. Make sure that \
record_to is set to either 'ascii' or 'memory'.")
def _from_memory(detec):
ev = detec.get("events")
return ev["times"], ev["senders"]
def _make_plot(ts, ts1, node_ids, neurons, hist=True, hist_binwidth=5.0,
grayscale=False, title=None, xlabel=None):
"""Generic plotting routine.
Constructs a raster plot along with an optional histogram (common part in
all routines above).
Parameters
----------
ts : list
All timestamps
ts1 : list
Timestamps corresponding to node_ids
node_ids : list
Global ids corresponding to ts1
neurons : list
Node IDs of neurons to plot
hist : bool, optional
Display histogram
hist_binwidth : float, optional
Width of histogram bins
grayscale : bool, optional
Plot in grayscale
title : str, optional
Plot title
xlabel : str, optional
Label for x-axis
"""
import matplotlib.pyplot as plt
plt.figure()
if grayscale:
color_marker = ".k"
color_bar = "gray"
else:
color_marker = "."
color_bar = "blue"
color_edge = "black"
if xlabel is None:
xlabel = "Time (ms)"
ylabel = "Neuron ID"
if hist:
ax1 = plt.axes([0.1, 0.3, 0.85, 0.6])
plotid = plt.plot(ts1, node_ids, color_marker)
plt.ylabel(ylabel)
plt.xticks([])
xlim = plt.xlim()
plt.axes([0.1, 0.1, 0.85, 0.17])
t_bins = numpy.arange(
numpy.amin(ts), numpy.amax(ts),
float(hist_binwidth)
)
n, _ = _histogram(ts, bins=t_bins)
num_neurons = len(numpy.unique(neurons))
heights = 1000 * n / (hist_binwidth * num_neurons)
plt.bar(t_bins, heights, width=hist_binwidth, color=color_bar,
edgecolor=color_edge)
plt.yticks([
int(x) for x in
numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4)
])
plt.ylabel("Rate (Hz)")
plt.xlabel(xlabel)
plt.xlim(xlim)
plt.axes(ax1)
else:
plotid = plt.plot(ts1, node_ids, color_marker)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if title is None:
plt.title("Raster plot")
else:
plt.title(title)
plt.draw()
return plotid
def _histogram(a, bins=10, bin_range=None, normed=False):
"""Calculate histogram for data.
Parameters
----------
a : list
Data to calculate histogram for
bins : int, optional
Number of bins
bin_range : TYPE, optional
Range of bins
normed : bool, optional
Whether distribution should be normalized
Raises
------
ValueError
"""
from numpy import asarray, iterable, linspace, sort, concatenate
a = asarray(a).ravel()
if bin_range is not None:
mn, mx = bin_range
if mn > mx:
raise ValueError("max must be larger than min in range parameter")
if not iterable(bins):
if bin_range is None:
bin_range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in bin_range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins, endpoint=False)
else:
if (bins[1:] - bins[:-1] < 0).any():
raise ValueError("bins must increase monotonically")
# best block size probably depends on processor cache size
block = 65536
n = sort(a[:block]).searchsorted(bins)
for i in range(block, a.size, block):
n += sort(a[i:i + block]).searchsorted(bins)
n = concatenate([n, [len(a)]])
n = n[1:] - n[:-1]
if normed:
db = bins[1] - bins[0]
return 1.0 / (a.size * db) * n, bins
else:
return n, bins
| gpl-2.0 |
fmfn/UnbalancedDataset | imblearn/utils/tests/test_estimator_checks.py | 2 | 3697 | import pytest
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.multiclass import check_classification_targets
from imblearn.base import BaseSampler
from imblearn.over_sampling.base import BaseOverSampler
from imblearn.utils import check_target_type as target_check
from imblearn.utils.estimator_checks import check_target_type
from imblearn.utils.estimator_checks import check_samplers_one_label
from imblearn.utils.estimator_checks import check_samplers_fit
from imblearn.utils.estimator_checks import check_samplers_sparse
from imblearn.utils.estimator_checks import check_samplers_preserve_dtype
from imblearn.utils.estimator_checks import check_samplers_string
from imblearn.utils.estimator_checks import check_samplers_nan
class BaseBadSampler(BaseEstimator):
"""Sampler without inputs checking."""
_sampling_type = "bypass"
def fit(self, X, y):
return self
def fit_resample(self, X, y):
check_classification_targets(y)
self.fit(X, y)
return X, y
class SamplerSingleClass(BaseSampler):
"""Sampler that would sample even with a single class."""
_sampling_type = "bypass"
def fit_resample(self, X, y):
return self._fit_resample(X, y)
def _fit_resample(self, X, y):
return X, y
class NotFittedSampler(BaseBadSampler):
"""Sampler without target checking."""
def fit(self, X, y):
X, y = self._validate_data(X, y)
return self
class NoAcceptingSparseSampler(BaseBadSampler):
"""Sampler which does not accept sparse matrix."""
def fit(self, X, y):
X, y = self._validate_data(X, y)
self.sampling_strategy_ = "sampling_strategy_"
return self
class NotPreservingDtypeSampler(BaseSampler):
_sampling_type = "bypass"
def _fit_resample(self, X, y):
return X.astype(np.float64), y.astype(np.int64)
class IndicesSampler(BaseOverSampler):
def _check_X_y(self, X, y):
y, binarize_y = target_check(y, indicate_one_vs_all=True)
X, y = self._validate_data(
X,
y,
reset=True,
dtype=None,
force_all_finite=False,
)
return X, y, binarize_y
def _fit_resample(self, X, y):
n_max_count_class = np.bincount(y).max()
indices = np.random.choice(np.arange(X.shape[0]), size=n_max_count_class * 2)
return X[indices], y[indices]
def test_check_samplers_string():
sampler = IndicesSampler()
check_samplers_string(sampler.__class__.__name__, sampler)
def test_check_samplers_nan():
sampler = IndicesSampler()
check_samplers_nan(sampler.__class__.__name__, sampler)
mapping_estimator_error = {
"BaseBadSampler": (AssertionError, "ValueError not raised by fit"),
"SamplerSingleClass": (AssertionError, "Sampler can't balance when only"),
"NotFittedSampler": (AssertionError, "No fitted attribute"),
"NoAcceptingSparseSampler": (TypeError, "A sparse matrix was passed"),
"NotPreservingDtypeSampler": (AssertionError, "X dtype is not preserved"),
}
def _test_single_check(Estimator, check):
estimator = Estimator()
name = estimator.__class__.__name__
err_type, err_msg = mapping_estimator_error[name]
with pytest.raises(err_type, match=err_msg):
check(name, estimator)
def test_all_checks():
_test_single_check(BaseBadSampler, check_target_type)
_test_single_check(SamplerSingleClass, check_samplers_one_label)
_test_single_check(NotFittedSampler, check_samplers_fit)
_test_single_check(NoAcceptingSparseSampler, check_samplers_sparse)
_test_single_check(NotPreservingDtypeSampler, check_samplers_preserve_dtype)
| mit |
shangwuhencc/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
endolith/numpy | tools/refguide_check.py | 2 | 37851 | #!/usr/bin/env python3
"""
refguide_check.py [OPTIONS] [-- ARGS]
- Check for a NumPy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
- Check docstring examples
- Check example blocks in RST files
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings::
$ python refguide_check.py --doctests ma
or in RST-based documentations::
$ python refguide_check.py --rst docs
"""
import copy
import doctest
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
import docutils.core
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from docutils.parsers.rst import directives
from pkg_resources import parse_version
import sphinx
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
SKIPBLOCK = doctest.register_optionflag('SKIPBLOCK')
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "numpy"
PUBLIC_SUBMODULES = [
'core',
'doc.structured_arrays',
'f2py',
'linalg',
'lib',
'lib.recfunctions',
'fft',
'ma',
'polynomial',
'matrixlib',
'random',
'testing',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
# cases where NumPy docstrings import things from SciPy:
'numpy.lib.vectorize',
'numpy.random.standard_gamma',
'numpy.random.gamma',
'numpy.random.vonmises',
'numpy.random.power',
'numpy.random.zipf',
# remote / local file IO with DataSource is problematic in doctest:
'numpy.lib.DataSource',
'numpy.lib.Repository',
])
# Skip non-numpy RST files, historical release notes
# Any single-directory exact match will skip the directory and all subdirs.
# Any exact match (like 'doc/release') will scan subdirs but skip files in
# the matched directory.
# Any filename will skip that file
RST_SKIPLIST = [
'scipy-sphinx-theme',
'sphinxext',
'neps',
'changelog',
'doc/release',
'doc/source/release',
'c-info.ufunc-tutorial.rst',
'c-info.python-as-glue.rst',
'f2py.getting-started.rst',
'arrays.nditer.cython.rst',
]
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
# NOTE: should NumPy have a better match between autosummary
# listings and __all__? For now, TR isn't convinced this is a
# priority -- focus on just getting docstrings executed / correct
r'numpy\.*',
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
Parameters
----------
path: str or None
cwd: str or None
Returns
-------
str
Relative path or absolute path based on current working directory
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
"""
Finds the occurrences of function names, special directives like data
and functions and scipy constants in the docstrings of `module`. The
following patterns are searched for:
* 3 spaces followed by function name, and maybe some spaces, some
dashes, and an explanation; only function names listed in
refguide are formatted like this (mostly, there may be some false
positives
* special directives, such as data and function
* (scipy.constants only): quoted list
The `names_dict` is updated by reference and accessible in calling method
Parameters
----------
module : ModuleType
The module, whose docstrings is to be searched
names_dict : dict
Dictionary which contains module name as key and a set of found
function names and directives as value
Returns
-------
None
"""
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""
Return a copy of the __all__ dict with irrelevant items removed.
Parameters
----------
module : ModuleType
The module whose __all__ dict has to be processed
Returns
-------
deprecated : list
List of callable and deprecated sub modules
not_deprecated : list
List of non callable or non deprecated sub modules
others : list
List of remaining types of sub modules
"""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
if not all_dict:
# Must be a pure documentation module like doc.structured_arrays
all_dict.append('__doc__')
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""
Return sets of objects from all_dict.
Will return three sets:
{in module_name.__all__},
{in REFGUIDE*},
and {missing from others}
Parameters
----------
all_dict : list
List of non deprecated sub modules for module_name
others : list
List of sub modules for module_name
names : set
Set of function names or special directives present in
docstring of module_name
module_name : ModuleType
Returns
-------
only_all : set
only_ref : set
missing : set
"""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
"""
Check if module `f` is deprecated
Parameter
---------
f : ModuleType
Returns
-------
bool
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
"""
Check that `all_dict` is consistent with the `names` in `module_name`
For instance, that there are no deprecated or extra objects.
Parameters
----------
all_dict : list
names : set
deprecated : list
others : list
module_name : ModuleType
dots : bool
Whether to print a dot for each check
Returns
-------
list
List of [(name, success_flag, output)...]
"""
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
"""
Validates the doc string in a snippet of documentation
`text` from file `name`
Parameters
----------
text : str
Docstring text
name : str
File name for which the doc string is to be validated
dots : bool
Whether to print a dot symbol for each check
Returns
-------
(bool, str)
"""
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'doc', 'currentmodule', 'autosummary', 'data', 'attr',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor', 'term', 'c:member',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Parameters
----------
module : ModuleType
names : set
Returns
-------
result : list
List of [(module_name, success_flag, output),...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'numpy': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,
'StringIO': io.StringIO,
}
class DTRunner(doctest.DocTestRunner):
"""
The doctest runner
"""
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
"""
Check the docstrings
"""
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary",
"# uninitialized", "#uninitialized"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = CHECK_NAMESPACE
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = (r'[\w\d_]+\(' +
', '.join([r'[\w\d_]+=(.+)']*num) +
r'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""
Run modified doctests for the set of `tests`.
Parameters
----------
tests: list
full_name : str
verbose : bool
doctest_warning : bool
Returns
-------
tuple(bool, list)
Tuple of (success, output)
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = io.StringIO(newline='')
success = True
# Redirect stderr to the stdout or output
tmp_stderr = sys.stdout if doctest_warnings else output
@contextmanager
def temp_cwd():
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Run tests, trying to restore global state afterward
cwd = os.getcwd()
with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \
redirect_stderr(tmp_stderr):
# try to ensure random seed is NOT reproducible
np.random.seed(None)
ns = {}
for t in tests:
# We broke the tests up into chunks to try to avoid PSEUDOCODE
# This has the unfortunate side effect of restarting the global
# namespace for each test chunk, so variables will be "lost" after
# a chunk. Chain the globals to avoid this
t.globs.update(ns)
t.filename = short_path(t.filename, cwd)
# Process our options
if any([SKIPBLOCK in ex.options for ex in t.examples]):
continue
fails, successes = runner.run(t, out=output.write, clear_globs=False)
if fails > 0:
success = False
ns = t.globs
output.seek(0)
return success, output.read()
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""
Check code in docstrings of the module's public symbols.
Parameters
----------
module : ModuleType
Name of module
verbose : bool
Should the result be verbose
ns : dict
Name space of module
dots : bool
doctest_warnings : bool
Returns
-------
results : list
List of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""
Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Parameters
----------
fname : str
File name
verbose : bool
ns : dict
Name space
dots : bool
doctest_warnings : bool
Returns
-------
list
List of [(item_name, success_flag, output), ...]
Notes
-----
refguide can be signalled to skip testing code by adding
``#doctest: +SKIP`` to the end of the line. If the output varies or is
random, add ``# may vary`` or ``# random`` to the comment. for example
>>> plt.plot(...) # doctest: +SKIP
>>> random.randint(0,10)
5 # random
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
if ns is None:
ns = CHECK_NAMESPACE
results = []
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
base_line_no = 0
for part in text.split('\n\n'):
try:
tests = parser.get_doctest(part, ns, fname, fname, base_line_no)
except ValueError as e:
if e.args[0].startswith('line '):
# fix line number since `parser.get_doctest` does not increment
# the reported line number by base_line_no in the error message
parts = e.args[0].split()
parts[1] = str(int(parts[1]) + base_line_no)
e.args = (' '.join(parts),) + e.args[1:]
raise
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts.append((part, base_line_no))
base_line_no += part.count('\n') + 2
# Reassemble the good bits and doctest them:
tests = []
for good_text, line_no in good_parts:
tests.append(parser.get_doctest(good_text, ns, fname, fname, line_no))
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def iter_included_files(base_path, verbose=0, suffixes=('.rst',)):
"""
Generator function to walk `base_path` and its subdirectories, skipping
files or directories in RST_SKIPLIST, and yield each file with a suffix in
`suffixes`
Parameters
----------
base_path : str
Base path of the directory to be processed
verbose : int
suffixes : tuple
Yields
------
path
Path of the directory and it's sub directories
"""
if os.path.exists(base_path) and os.path.isfile(base_path):
yield base_path
for dir_name, subdirs, files in os.walk(base_path, topdown=True):
if dir_name in RST_SKIPLIST:
if verbose > 0:
sys.stderr.write('skipping files in %s' % dir_name)
files = []
for p in RST_SKIPLIST:
if p in subdirs:
if verbose > 0:
sys.stderr.write('skipping %s and subdirs' % p)
subdirs.remove(p)
for f in files:
if (os.path.splitext(f)[1] in suffixes and
f not in RST_SKIPLIST):
yield os.path.join(dir_name, f)
def check_documentation(base_path, results, args, dots):
"""
Check examples in any *.rst located inside `base_path`.
Add the output to `results`.
See Also
--------
check_doctests_testfile
"""
for filename in iter_included_files(base_path, args.verbose):
if dots:
sys.stderr.write(filename + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(
filename,
(args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
# stub out a "module" which is needed when reporting the result
def scratch():
pass
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write('\n')
sys.stderr.flush()
def init_matplotlib():
"""
Check feasibility of matplotlib initialization.
"""
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
"""
Validates the docstrings of all the pre decided set of
modules for errors and docstring standards.
"""
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true",
help="Run also doctests on ")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--rst", nargs='?', const='doc', default=None,
help=("Run also examples from *rst files "
"discovered walking the directory(s) specified, "
"defaults to 'doc'"))
args = parser.parse_args(argv)
modules = []
names_dict = {}
if not args.module_names:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in module_names:
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
dots = True
success = True
results = []
errormsgs = []
if args.doctests or args.rst:
init_matplotlib()
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
if args.doctests or not args.rst:
print("Running checks for %d modules:" % (len(modules),))
for module in modules:
if dots:
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others,
module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write('\n')
sys.stderr.flush()
if args.rst:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
rst_path = os.path.relpath(os.path.join(base_dir, args.rst))
if os.path.exists(rst_path):
print('\nChecking files in %s:' % rst_path)
check_documentation(rst_path, results, args, dots)
else:
sys.stderr.write(f'\ninvalid --rst argument "{args.rst}"')
errormsgs.append('invalid directory argument to --rst')
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
for module, mod_results in results:
success = all(x[1] for x in mod_results)
if not success:
errormsgs.append(f'failed checking {module.__name__}')
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if len(errormsgs) == 0:
print("\nOK: all checks passed!")
sys.exit(0)
else:
print('\nERROR: ', '\n '.join(errormsgs))
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
Karl-Marka/data-mining | scleroderma-prediction/Build_model_v.2.py | 1 | 3273 | print('Importing libraries')
from pandas import DataFrame, read_csv
from sklearn import linear_model
from sklearn.preprocessing import StandardScaler
import numpy as np
#positive = ['GSM489234', 'GSM489228', 'GSM489221', 'GSM489229', 'GSM489220', 'GSM489223', 'GSM489233', 'GSM489230', 'GSM489231', 'GSM489225', 'GSM489232', 'GSM489205', 'GSM489198', 'GSM489213', 'GSM489202', 'GSM489218', 'GSM489199', 'GSM489208', 'GSM489197', 'GSM489210', 'GSM489212', 'GSM489195', 'GSM489206', 'GSM489217', 'GSM489194', 'GSM489214', 'GSM489203', 'GSM489211']
normprobes = ['A_23_P414913', 'A_24_P237443', 'A_32_P168349', 'A_23_P414654', 'A_24_P192914']
train = read_csv('./datasets_large/train_nocorrelated_top60_normprobes.txt', header = 0, index_col = 0, sep = '\t')
train = train.sort_index(axis = 0)
normprobes_train = train.ix[normprobes]
normprobes_train = normprobes_train.mean(axis = 0)
train = train.subtract(normprobes_train, axis = 1)
train = train.drop(normprobes)
train = train.T
#train_sc = train.ix[positive]
labels_train_sc = read_csv('./datasets_large/labels_train_sc.txt', sep = '\t', header = None)
labels_train_pah = read_csv('./datasets_large/labels_train_pah.txt', sep = '\t', header = None)
labels_train_sc = labels_train_sc.unstack().tolist()
labels_train_pah = labels_train_pah.unstack().tolist()
header_train = train.columns
index_train = train.index
test = read_csv('./datasets_large/test_nocorrelated_top60_normprobes.txt', header = 0, index_col = 0, sep = '\t')
test = test.sort_index(axis = 0)
normprobes_test = test.ix[normprobes]
normprobes_test = normprobes_test.mean(axis = 0)
test = test.subtract(normprobes_test, axis = 1)
test = test.drop(normprobes)
test = test.T
labels_test_sc = read_csv('./datasets_large/labels_test_sc.txt', sep = '\t', header = None)
labels_test_pah = read_csv('./datasets_large/labels_test_pah.txt', sep = '\t', header = None)
labels_test_sc = labels_test_sc.unstack().tolist()
labels_test_pah = labels_test_pah.unstack().tolist()
header_test = test.columns
index_test = test.index
stds = StandardScaler()
stds = stds.fit(train)
train = stds.transform(train)
train = DataFrame(data = train, columns = header_train, index = index_train)
test = stds.transform(test)
test = DataFrame(data = test, columns = header_test, index = index_test)
means = stds.mean_
std_deviations = stds.std_
means = list(means)
std_deviations = list(std_deviations)
#print(means)
#print(std_deviations)
lr1 = linear_model.LinearRegression()
lr2 = linear_model.LinearRegression()
sc = lr1.fit(train, labels_train_sc)
pah = lr2.fit(train, labels_train_pah)
intercept_sc = sc.intercept_
coefs_sc = sc.coef_
intercept_pah = pah.intercept_
coefs_pah = pah.coef_
print(intercept_sc)
print(list(coefs_sc))
#print(intercept_pah)
#print(list(coefs_pah))
predictions_train_sc = sc.predict(train)
predictions_train_pah = pah.predict(train)
predictions_test_sc = sc.predict(test)
predictions_test_pah = pah.predict(test)
MSE_sc = np.mean((predictions_test_sc - labels_test_sc)**2)
MSE_pah = np.mean((predictions_test_pah - labels_test_pah)**2)
#print('MSE on Sc:', MSE_sc)
#print('MSE on PAH:', MSE_pah)
#print(list(predictions_train_sc))
#print(list(predictions_train_pah))
#print(list(predictions_test_sc))
#print(list(predictions_test_pah)) | gpl-3.0 |
zytaw/foraminifera | src/drawing/drawAuto.py | 1 | 2095 | # coding: utf-8
import drawTools
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os, re, sys
from numpy.ma import masked_array
# BIAŁY = WSZECHOBECNA ODCHŁAŃ 0
# ZÓŁTY = FORAMINIFERA 2
# ZIELONY = ALGA 1
# NIEBIESKI = SMRÓD [0.;1)
cmap = mpl.colors.ListedColormap(['white','green','yellow','blue'])
grays = np.linspace(0.1,0.9,64)
bounds = [0,1,2,4,99]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
normBin = mpl.colors.Normalize(vmin=0.,vmax=0.999)
colorBin = 'binary'
simulation_steps = 0
automaton = drawTools.Grids(os.getcwd() + '/../output')
# pause = True
def onclick(event):
global pause
pause = not pause
# with open(os.getcwd()+'/../simulation/records_for_test.hrl', 'r') as file:
with open(os.getcwd()+'/../simulation/records.hrl', 'r') as file:
text = file.read()
automaton.setDim(int(re.findall('\d+', re.findall('GS,\s+[0-9]+', text)[0])[0]))
simulation_steps = int(re.findall('\d+', re.findall('SIMULATION_STEPS,\s+[0-9]+', text)[0])[0])
N = automaton.getDim()
f, axarr = plt.subplots(1)
result = automaton.Grid(0)
grid = np.array(result[0])
gridKind = masked_array(grid, grid<1.0)
gridSmell = result[1] #masked_array(grid, grid>=1.0)
# axarr.imshow(gridSmell, interpolation='nearest', cmap=colorBin)#, origin="lower")
axarr.imshow(gridKind, interpolation='nearest', cmap=cmap, norm=norm)#, origin="lower")
def update(N):
N = int(N)
result = automaton.Grid(N)
grid = np.array(result[0])
gridKind = grid
# gridKind = masked_array(grid, grid<1.0)
gridSmell = result[1] #masked_array(grid, grid>=1.0)
# axarr.imshow(gridSmell, interpolation='nearest', cmap=colorBin, origin="lower")
axarr.imshow(gridKind, interpolation='nearest', cmap=cmap, norm=norm, origin="lower")
# f.canvas.mpl_connect('button_press_event', onclick)
# f.canvas.draw()
plt.ion()
for i in range(0, simulation_steps):
print '\n', i
update(i)
# while pause:
# plt.pause(0.2)
# pause = True
plt.pause(0.15)
| mit |
simonsfoundation/inferelator_ng | inferelator_ng/bbsr_python.py | 1 | 11003 | import pandas as pd
import numpy as np
import itertools
from itertools import compress
import math
from scipy import special
import multiprocessing
from functools import partial
import os, sys
from . import utils
# Wrapper function for BBSRforOneGene that's called in BBSR
gx, gy, gpp, gwm, gns = None, None, None, None, None
def BBSRforOneGeneWrapper(ind): return BBSRforOneGene(ind, gx, gy, gpp, gwm, gns)
def BBSR(X, Y, clr_mat, nS, no_pr_val, weights_mat, prior_mat, kvs, rank, ownCheck):
G = Y.shape[0] # number of genes
genes = Y.index.values.tolist()
K = X.shape[0] # max number of possible predictors (number of TFs)
tfs = X.index.values.tolist()
# Scale and permute design and response matrix
X = ((X.transpose() - X.transpose().mean()) / X.transpose().std(ddof=1)).transpose()
Y = ((Y.transpose() - Y.transpose().mean()) / Y.transpose().std(ddof=1)).transpose()
weights_mat = weights_mat.loc[genes,tfs]
clr_mat = clr_mat.loc[genes, tfs]
prior_mat = prior_mat.loc[genes, tfs]
# keep all predictors that we have priors for
pp = pd.DataFrame(((prior_mat.ix[:,:] != 0)|(weights_mat.ix[:,:]!=no_pr_val)) & ~pd.isnull(clr_mat))
mask = clr_mat == 0
# for each gene, add the top nS predictors of the list to possible predictors
clr_mat[mask] = np.nan
for ind in range(0,G):
clr_na = len(np.argwhere(np.isnan(clr_mat.ix[ind,])).flatten().tolist())
clr_w_na = np.argsort(clr_mat.ix[ind,].tolist())
if clr_na>0:
clr_order = clr_w_na[:-clr_na][::-1]
else:
clr_order = clr_w_na[:][::-1]
pp.ix[ind, clr_order[0:min(K, nS, len(clr_order))]] = True
preds = np.intersect1d(genes, tfs)
subset = pp.ix[preds,preds].values
np.fill_diagonal(subset,False)
pp=pp.set_value(preds, preds, subset)
out_list=[]
global gx, gy, gpp, gwm, gns
gx, gy, gpp, gwm, gns = X, Y, pp, weights_mat, nS
# Here we illustrate splitting a simple loop, but the same approach
# would work with any iterative control structure, as long as it is
# deterministic.
s = []
limit = G
for j in range(limit):
if next(ownCheck):
s.append(BBSRforOneGeneWrapper(j))
# Report partial result.
kvs.put('plist',(rank,s))
# One participant gathers the partial results and generates the final
# output.
if 0 == rank:
s=[]
workers=int(os.environ['SLURM_NTASKS'])
for p in range(workers):
wrank,ps = kvs.get('plist')
s.extend(ps)
print ('final s', len(s))
utils.kvsTearDown(kvs, rank)
return s
else:
return None
def BBSRforOneGene(ind, X, Y, pp, weights_mat, nS):
if ind % 100 == 0:
print('Progress: computing BBSR for gene {}'.format(ind))
pp_i = pp.ix[ind,].values # converted to numpy array
pp_i_index = [l for l, j in enumerate(pp_i) if j]
if sum(pp_i) == 0:
return dict(ind=ind,pp=np.repeat(True, len(pp_i)).tolist(),betas=0, betas_resc=0)
# create BestSubsetRegression input
y = Y.ix[ind,:][:, np.newaxis]
x = X.ix[pp_i_index,:].transpose().values # converted to numpy array
g = np.matrix(weights_mat.ix[ind,pp_i_index],dtype=np.float)
# experimental stuff
spp = ReduceNumberOfPredictors(y, x, g, nS)
#check again
pp_i[pp_i==True] = spp # this could cause issues if they aren't the same length
pp_i_index = [l for l, j in enumerate(pp_i) if j]
x = X.ix[pp_i_index,:].transpose().values # converted to numpy array
g = np.matrix(weights_mat.ix[ind,pp_i_index],dtype=np.float)
betas = BestSubsetRegression(y, x, g)
betas_resc = PredictErrorReduction(y, x, betas)
return (dict(ind=ind, pp=pp_i, betas=betas, betas_resc=betas_resc))
def ReduceNumberOfPredictors(y, x, g, n):
K = x.shape[1] #what is the maximum size of K, print K
spp = None
if K <= n:
spp = np.repeat(True, K).tolist()
return spp
combos = np.hstack((np.diag(np.repeat(True,K)),CombCols(K)))
bics = ExpBICforAllCombos(y, x, g, combos)
bics_sum = np.sum(np.multiply(combos.transpose(),bics[:, np.newaxis]).transpose(),1)
bics_sum = list(bics_sum)
ret = np.repeat(False, K)
ret[np.argsort(bics_sum)[0:n]] = True
return ret
def BestSubsetRegression(y, x, g):
# Do best subset regression by using all possible combinations of columns of
#x as predictors of y. Model selection criterion is BIC using results of
# Bayesian regression with Zellner's g-prior.
# Args:
# y: dependent variable
# x: independent variable
# g: value for Zellner's g-prior; can be single value or vector
# Returns:
# Beta vector of best mode
K = x.shape[1]
N = x.shape[0]
ret = []
combos = AllCombinations(K)
bics = ExpBICforAllCombos(y, x, g, combos)
not_done = True
while not_done:
best = np.argmin(bics)
betas = np.repeat(0.0,K)
if best > 0:
lst_combos_bool=combos[:, best]
lst_true_index = [i for i, j in enumerate(lst_combos_bool) if j]
x_tmp = x[:,lst_true_index]
bhat = np.linalg.solve(np.dot(x_tmp.transpose(),x_tmp),np.dot(x_tmp.transpose(),y))
for m in range(len(lst_true_index)):
ind_t=lst_true_index[m]
betas[ind_t]=bhat[m]
not_done = False
else:
not_done = False
return betas
def AllCombinations(k):
# Create a boolean matrix with all possible combinations of 1:k. Output has k rows and 2^k columns where each column is one combination.
# Note that the first column is all FALSE and corresponds to the null model.
if k < 1:
raise ValueError("No combinations for k < 1")
lst = map(list, itertools.product([False, True], repeat=k))
out=np.array([i for i in lst]).transpose()
return out
# Get all possible pairs of K predictors
def CombCols(K):
num_pair = K*(K-1)/2
a = np.full((num_pair,K), False, dtype=bool)
b = list(list(tup) for tup in itertools.combinations(range(K), 2))
for i in range(len(b)):
a[i,b[i]]=True
c = a.transpose()
return c
def ExpBICforAllCombos(y, x, g, combos):
# For a list of combinations of predictors do Bayesian linear regression, more specifically calculate the parametrization of the inverse gamma
# distribution that underlies sigma squared using Zellner's g-prior method.
# Parameter g can be a vector. The expected value of the log of sigma squared is used to compute expected values of BIC.
# Returns list of expected BIC values, one for each model.
K = x.shape[1]
N = x.shape[0]
C = combos.shape[1]
bics = np.array(np.repeat(0,C),dtype=np.float)
# is the first combination the null model?
first_combo = 0
if sum(combos[:,0]) == 0:
bics[0] = N * math.log(np.var(y,ddof=1))
first_combo = 1
# shape parameter for the inverse gamma sigma squared would be drawn from
shape = N / 2
# compute digamma of shape here, so we can re-use it later
dig_shape = special.digamma(shape)
#### pre-compute the dot products that we will need to solve for beta
xtx = np.dot(x.transpose(),x)
xty = np.dot(x.transpose(),y)
# In Zellner's formulation there is a factor in the calculation of the rate parameter: 1 / (g + 1)
# Here we replace the factor with the approriate matrix since g is a vector now.
var_mult = np.array(np.repeat(np.sqrt(1 / (g + 1)), K,axis=0)).transpose()
var_mult = np.multiply(var_mult,var_mult.transpose())
for i in range(first_combo, C):
comb = combos[:, i]
comb=np.where(comb)[0]
x_tmp = x[:,comb]
k = len(comb)
xtx_tmp=xtx[:,comb][comb,:]
# if the xtx_tmp matrix is singular, set bic to infinity
if np.linalg.matrix_rank(xtx_tmp, tol=1e-10) == xtx_tmp.shape[1]:
var_mult_tmp=var_mult[:,comb][comb,:]
#faster than calling lm
bhat = np.linalg.solve(xtx_tmp,xty[comb])
ssr = np.sum(np.power(np.subtract(y,np.dot(x_tmp, bhat)),2)) # sum of squares of residuals
# rate parameter for the inverse gamma sigma squared would be drawn from our guess on the regression vector beta is all 0 for sparse models
rate = (ssr + np.dot((0 - bhat.transpose()) , np.dot(np.multiply(xtx_tmp, var_mult_tmp) ,(0 - bhat.transpose()).transpose()))) / 2
# the expected value of the log of sigma squared based on the parametrization of the inverse gamma by rate and shape
exp_log_sigma2 = math.log(rate) - dig_shape
# expected value of BIC
bics[i] = N * exp_log_sigma2 + k * math.log(N)
# set bic to infinity if lin alg error
else:
bics[i] = np.inf
return(bics)
def PredictErrorReduction(y, x, beta):
# Calculates the error reduction (measured by variance of residuals) of each
# predictor - compare full model to model without that predictor
N = x.shape[0]
K = x.shape[1]
pred = [True if item!=0 else False for item in beta]
pred_index = [l for l, j in enumerate(pred) if j]
P = sum(pred)
# compute sigma^2 for full model
residuals = np.subtract(y,np.dot(x,beta)[:, np.newaxis])
sigma_sq_full = np.var(residuals,ddof=1)
# this will be the output
err_red = np.repeat(0.0,K)
# special case if there is only one predictor
if P == 1:
err_red[pred_index] = 1 - (sigma_sq_full/np.var(y,ddof=1))
# one by one leave out each predictor and re-compute the model with the remaining ones
for i in pred_index[0:K]:
pred_tmp = pred[:]
pred_tmp[i] = False
pred_tmp_index= [l for l, j in enumerate(pred_tmp) if j]
x_tmp = x[:,pred_tmp_index]
bhat = np.linalg.solve(np.dot(x_tmp.transpose(),x_tmp),np.dot(x_tmp.transpose(),y))
residuals = np.subtract(y,np.dot(x_tmp,bhat))
sigma_sq = np.var(residuals,ddof=1)
err_red[i] = 1 - (sigma_sq_full / sigma_sq)
return err_red
class BBSR_runner:
def run(self, X, Y, clr, prior_mat, kvs=None, rank=0, ownCheck=None):
n = 10
no_prior_weight = 1
prior_weight = 1 # prior weight has to be larger than 1 to have an effect
weights_mat = prior_mat * 0 + no_prior_weight
weights_mat = weights_mat.mask(prior_mat != 0, other=prior_weight)
run_result = BBSR(X, Y, clr, n, no_prior_weight, weights_mat, prior_mat, kvs, rank, ownCheck)
if rank:
return (None,None)
bs_betas = pd.DataFrame(np.zeros((Y.shape[0],prior_mat.shape[1])),index=Y.index,columns=prior_mat.columns)
bs_betas_resc = bs_betas.copy(deep=True)
for res in run_result:
bs_betas.ix[res['ind'],X.index.values[res['pp']]] = res['betas']
bs_betas_resc.ix[res['ind'],X.index.values[res['pp']]] = res['betas_resc']
return (bs_betas, bs_betas_resc)
| bsd-2-clause |
ddboline/kaggle_facebook_recruiting_human_or_bot | plot_data.py | 2 | 2041 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 28 23:15:29 2015
@author: ddboline
"""
import os
import matplotlib
matplotlib.use('Agg')
import pylab as pl
from pandas.tools.plotting import scatter_matrix
def create_html_page_of_plots(list_of_plots, prefix='html'):
"""
create html page with png files
"""
if not os.path.exists(prefix):
os.makedirs(prefix)
os.system('mv *.png %s' % prefix)
#print(list_of_plots)
idx = 0
htmlfile = open('%s/index_0.html' % prefix, 'w')
htmlfile.write('<!DOCTYPE html><html><body><div>\n')
for plot in list_of_plots:
if idx > 0 and idx % 200 == 0:
htmlfile.write('</div></html></html>\n')
htmlfile.close()
htmlfile = open('%s/index_%d.html' % (prefix, (idx//200)), 'w')
htmlfile.write('<!DOCTYPE html><html><body><div>\n')
htmlfile.write('<p><img src="%s"></p>\n' % plot)
idx += 1
htmlfile.write('</div></html></html>\n')
htmlfile.close()
def plot_data(indf, prefix='html'):
"""
create scatter matrix plot, histograms
"""
list_of_plots = []
column_groups = []
for idx in range(0, len(indf.columns), 3):
print len(indf.columns), idx, (idx+3)
column_groups.append(indf.columns[idx:(idx+3)])
for idx in range(len(column_groups)):
for idy in range(0, idx):
if idx == idy:
continue
print column_groups[idx]+column_groups[idy]
pl.clf()
scatter_matrix(indf[column_groups[idx]+column_groups[idy]])
pl.savefig('scatter_matrix_%d_%d.png' % (idx, idy))
list_of_plots.append('scatter_matrix_%d_%d.png' % (idx, idy))
pl.close()
for col in indf:
pl.clf()
print col
indf[col].hist(histtype='step', normed=True)
pl.title(col)
pl.savefig('%s_hist.png' % col)
list_of_plots.append('%s_hist.png' % col)
create_html_page_of_plots(list_of_plots, prefix)
return
| mit |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/matplotlib/pylab.py | 10 | 10782 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
MATLAB |reg| [*]_ analogs and similar arguments.
.. |reg| unicode:: 0xAE
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
autoscale - turn axis autoscaling on or off, and apply it
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
violinplot - make a violin plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a graphics property
grid - set whether gridding is on
hist - make a histogram
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imsave - save array as an image file
imshow - plot image data
legend - make an axes legend
locator_params - adjust parameters used in locating axis ticks
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
margins - set margins used in autoscaling
pause - pause for a specified interval
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make one subplot (numrows, numcols, axesnum)
subplots - make a figure with a set of (numrows, numcols) subplots
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
tick_params - control the appearance of ticks and tick labels
ticklabel_format - control the format of tick labels
title - add a title to the current axes
tricontour - make a contour plot on a triangular grid
tricontourf - make a filled contour plot on a triangular grid
tripcolor - make a pseudocolor plot on a triangular grid
triplot - plot a triangular grid
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
amax - the maximum along dimension m
amin - the minimum along dimension m
corrcoef - correlation coefficient
cov - covariance matrix
mean - the mean along dimension m
median - the median along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
ksdensity - the kernel density estimate
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - Deprecated--please use loadtxt.
loadtxt - load ASCII data into array.
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - Deprecated--please use savetxt.
savetxt - save an array to an ASCII file.
trapz - trapezoidal integration
__end
.. [*] MATLAB is a registered trademark of The MathWorks, Inc.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import sys, warnings
from matplotlib.cbook import (
flatten, is_string_like, exception_to_str, silent_list, iterable, dedent)
import matplotlib as mpl
# make mpl.finance module available for backwards compatability, in case folks
# using pylab interface depended on not having to import it
with warnings.catch_warnings():
warnings.simplefilter("ignore") # deprecation: moved to a toolkit
import matplotlib.finance
from matplotlib.dates import (
date2num, num2date, datestr2num, strpdate2num, drange, epoch2num,
num2epoch, mx2num, DateFormatter, IndexDateFormatter, DateLocator,
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator, DayLocator,
HourLocator, MinuteLocator, SecondLocator, rrule, MO, TU, WE, TH, FR,
SA, SU, YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY,
relativedelta)
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
## We are still importing too many things from mlab; more cleanup is needed.
from matplotlib.mlab import (
amap, base_repr, binary_repr, bivariate_normal, center_matrix, csv2rec,
demean, detrend, detrend_linear, detrend_mean, detrend_none, dist,
dist_point_to_segment, distances_along_curve, entropy, exp_safe,
fftsurr, find, frange, get_sparse_matrix, get_xyz_where, griddata,
identity, inside_poly, is_closed_polygon, ispower2, isvector, l1norm,
l2norm, log2, longest_contiguous_ones, longest_ones, movavg, norm_flat,
normpdf, path_length, poly_below, poly_between, prctile, prctile_rank,
rec2csv, rec_append_fields, rec_drop_fields, rec_join, rk4, rms_flat,
segments_intersect, slopes, stineman_interp, vector_lengths,
window_hanning, window_none)
from matplotlib import cbook, mlab, pyplot as plt
from matplotlib.pyplot import *
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
import numpy as np
import numpy.ma as ma
# don't let numpy's datetime hide stdlib
import datetime
# This is needed, or bytes will be numpy.random.bytes from
# "from numpy.random import *" above
bytes = six.moves.builtins.bytes
| apache-2.0 |
ASinanSaglam/Ramaplot | AmberForceField.py | 1 | 38302 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ramaplot.AmberForceField.py
#
# Copyright (C) 2015 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Reads and represents AMBER-format force fields
"""
################################### MODULES ###################################
from __future__ import absolute_import,division,print_function,unicode_literals
import re
from .ForceField import ForceField
################################### CLASSES ###################################
class AmberForceField(ForceField):
"""
Reads and represents AMBER-format force fields
"""
@staticmethod
def get_cache_key(parm=None, *args, **kwargs):
"""
Generates tuple of arguments to be used as key for dataset
cache.
"""
from os.path import expandvars
return (AmberForceField, expandvars(parm))
@staticmethod
def get_cache_message(cache_key):
return "previously loaded from '{0}'".format(cache_key[1])
par_re = dict(
blank = "^\s*$",
mass = "^(?P<type>{t}){w}"
"(?P<mass>{f})"
"(?P<polarizability>{w}{f}|{w})"
"(?P<note>.*$)",
atomlist = "^({t}{w})*$",
bond = "^(?P<type_1>{t})-"
"(?P<type_2>{t}){w}"
"(?P<force_constant>{f}){w}"
"(?P<length>{f}){w}"
"(?P<note>.*$)",
angle = "^(?P<type_1>{t})-"
"(?P<type_2>{t})-"
"(?P<type_3>{t}){w}"
"(?P<force_constant>{f}){w}"
"(?P<angle>{f}){w}"
"(?P<note>.*$)",
dihedral = "^(?P<type_1>{t})-"
"(?P<type_2>{t})-"
"(?P<type_3>{t})-"
"(?P<type_4>{t}){w}"
"(?P<divider>{i}){w}"
"(?P<barrier>{sf}){w}"
"(?P<phase>{sf}){w}"
"(?P<periodicity>{sf}){w}"
"(?P<note>.*$)",
improper = "^(?P<type_1>{t})-"
"(?P<type_2>{t})-"
"(?P<type_3>{t})-"
"(?P<type_4>{t}){w}"
"(?P<barrier>{sf}){w}"
"(?P<phase>{sf}){w}"
"(?P<periodicity>{sf}){w}"
"(?P<note>.*$)",
hbond = "^{w}(?P<type_1>{t}){w}"
"(?P<type_2>{t}){w}"
"(?P<A>{f}){w}"
"(?P<B>{f}){w}"
"(?P<ASOLN>{f}){w}"
"(?P<note>.*$)",
vdw_format = "^.+{w}(?P<vdw_format>SK|RE|AC).*$",
vdw = "^{w}(?P<type>{t}){w}"
"(?P<radius>{f}){w}"
"(?P<well_depth>{f}){w}"
"(?P<note>.*$)",
ljedit_title = "^LJEDIT$",
ljedit = "^{w}(?P<type_1>{t}){w}"
"(?P<type_2>{t}){w}"
"(?P<radius_1>{f}){w}"
"(?P<well_depth_1>{f}){w}"
"(?P<radius_2>{f}){w}"
"(?P<well_depth_2>{f}){w}"
"(?P<note>.*$)",
end = "^END$")
lib_re = dict(
blank = "^\s*$",
atoms = "^\s*\"(?P<name>{a})\"{w}"
"\"(?P<type>{t})\"{w}"
"(?P<type_index>{i}){w}"
"(?P<residue_index>{i}){w}"
"(?P<flags>{i}){w}"
"(?P<atom_index>{i}){w}"
"(?P<element>{i}){w}"
"(?P<charge>{sf}){w}"
"(?P<note>.*$)",
atom_edits = "^\s*\"(?P<name>{a})\"{w}"
"\"(?P<type>{t})\"{w}"
"(?P<type_index>{i}){w}"
"(?P<element>{si}{w})"
"(?P<charge>{sf}{w})"
"(?P<note>.*$)",
box = "^\s*(?P<box>{sf}){w}"
"(?P<note>.*$)",
res_seq = "^\s*(?P<childsequence>{i}){w}"
"(?P<note>.*$)",
res_connect = "^\s*(?P<connect>{i}){w}"
"(?P<note>.*$)",
bonds = "^\s*(?P<atom_index_1>{i}){w}"
"(?P<atom_index_2>{t}){w}"
"(?P<flag>{i}){w}"
"(?P<note>.*$)",
hierarchy = "^\s*\"(?P<above_type>U|R|A)\"{w}"
"(?P<above_index>{i}){w}"
"\"(?P<below_type>U|R|A)\"{w}"
"(?P<below_index>{i}){w}"
"(?P<note>.*$)",
name = "^\s*\"(?P<name>{r})\""
"(?P<note>.*$)",
coordinates = "^\s*(?P<x>{sfe}){w}"
"(?P<y>{sfe}){w}"
"(?P<z>{sfe}){w}"
"(?P<note>.*$)",
res_connect2 = "^\s*(?P<atom_index_1>{i}){w}"
"(?P<atom_index_2>{i}){w}"
"(?P<atom_index_3>{i}){w}"
"(?P<atom_index_4>{i}){w}"
"(?P<atom_index_5>{i}){w}"
"(?P<atom_index_6>{i}){w}"
"(?P<note>.*$)",
residues = "^\s*\"(?P<name>{r})\"{w}"
"(?P<residue_index>{i}){w}"
"(?P<child_atom_index>{i}){w}"
"(?P<start_atom_index>{i}){w}"
"\"(?P<residue_type>p|n|w|\?)\"{w}"
"(?P<note>.*$)",
pdb_seq = "^\s*(?P<residue_index>{i}){w}"
"(?P<note>.*$)",
solventcap = "^\s*(?P<solventcap>{sf}){w}"
"(?P<note>.*$)",
velocities = "^\s*(?P<x>{sfe}){w}"
"(?P<y>{sfe}){w}"
"(?P<z>{sfe}){w}"
"(?P<note>.*$)")
def __init__(self, parm=None, **kwargs):
"""
"""
if parm is not None:
self.parameters = self.read_parm(parm, **kwargs)
@staticmethod
def amber_regex(regex, title=False):
"""
Prepares regex for matching AMBER fields
Arguments:
regex (string): regular expression
Returns:
(string): regular expression
"""
if title:
regex = "^!entry\.(?P<residue_name>{r})\.unit\." + regex + "{w}.*$"
return re.compile(regex.format(
r = "[\w][\w][\w][\w]?", # Residue
a = "[\w][\w]?[\w]?[\w]?", # Atom name
t = "[\w][\w \*]?", # Atom type
i = "\d+", # Integer
si = "[-]?\d+", # Signed Integer
f = "\d+\.?\d*?", # Float
sf = "[-]?\d+\.?\d*?", # Signed float
sfe = "[-]?\d+\.?\d*?[E]?[-]?\d*?", # Signed float in E notation
w = "\s+")) # Whitespace
@staticmethod
def strip_dict(dictionary):
"""
Strips each string in a dict, and deletes if empty
Arguements:
dictionary (dict): dictionary to strip
Returns:
(dict): dictionary with each element stripped
"""
for key, value in dictionary.items():
value = value.strip()
if value == "":
del dictionary[key]
else:
dictionary[key] = value
return dictionary
@staticmethod
def read_parm(infile, verbose=1, debug=0, **kwargs):
"""
Reads a parm file
Arguments:
infile (str): Path to input parm file
verbose (int): Enable verbose output
debug (int): Enable debug output
kwargs (dict): Additional keyword arguments
"""
import pandas as pd
if verbose >= 1:
print("READING PARM: {0}".format(infile))
strip_dict = AmberForceField.strip_dict
amber_regex = AmberForceField.amber_regex
par_re = AmberForceField.par_re
re_blank = amber_regex(par_re["blank"])
re_mass = amber_regex(par_re["mass"])
re_atomlist = amber_regex(par_re["atomlist"])
re_bond = amber_regex(par_re["bond"])
re_angle = amber_regex(par_re["angle"])
re_dihedral = amber_regex(par_re["dihedral"])
re_improper = amber_regex(par_re["improper"])
re_hbond = amber_regex(par_re["hbond"])
re_vdw_format = amber_regex(par_re["vdw_format"])
re_vdw = amber_regex(par_re["vdw"])
re_ljedit_title = amber_regex(par_re["ljedit_title"])
re_ljedit = amber_regex(par_re["ljedit"])
re_end = amber_regex(par_re["end"])
mass_types = pd.DataFrame(columns=["type", "mass",
"polarizability", "note"])
hydrophobic_types = pd.DataFrame(columns=["type"])
bonds = pd.DataFrame(columns=["type_1", "type_2",
"force_constant", "length", "note"])
angles = pd.DataFrame(columns=["type_1", "type_2", "type_3",
"force_constant", "angle", "note"])
dihedrals = pd.DataFrame(columns=["type_1", "type_2", "type_3",
"type_4", "divider", "barrier", "phase",
"periodicity", "note"])
impropers = pd.DataFrame(columns=["type_1", "type_2", "type_3",
"type_4", "barrier", "phase",
"periodicity", "note"])
hbonds = pd.DataFrame(columns=["type_1", "type_2", "A", "B",
"ASOLN"])
vdw_eq_types = pd.DataFrame()
vdw_types = pd.DataFrame(columns= ["type", "radius",
"well_depth", "note"])
ljedits = pd.DataFrame(columns= ["type_1", "type_2",
"radius_1", "well_depth_1", "radius_2",
"well_depth_2"])
section = 1
with open(infile, "r") as open_infile:
line = open_infile.readline()
while line:
# BLANK
if re.match(re_blank, line):
if verbose >= 1:
print("BLANK |{0}".format(line.strip()))
# 1: TITLE
elif section <= 1 and not re.match(re_mass, line):
if verbose >= 1:
print("TITLE |{0}".format(line.strip()))
# 2: MASS
elif section <= 2 and re.match(re_mass, line):
section = 2
if verbose >= 1:
print("MASS |{0}".format(line.strip()))
fields = strip_dict(re.match(re_mass, line).groupdict())
mass_types = mass_types.append(fields, ignore_index=True)
# 3: HYDROPHIC (list of types)
elif section <= 3 and re.match(re_atomlist, line):
section = 3
if verbose >= 1:
print("HYDROPHOBIC |{0}".format(line.rstrip()))
fields = [{"type": v} for v in
amber_regex("{t}").findall(line)]
hydrophobic_types = hydrophobic_types.append(fields,
ignore_index=True)
# 4: BOND
elif section <= 4 and re.match(re_bond, line):
section = 4
if verbose >= 1:
print("BOND |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_bond, line).groupdict())
bonds = bonds.append(fields, ignore_index=True)
# 5: ANGLE
elif section <= 5 and re.match(re_angle, line):
section = 5
if verbose >= 1:
print("ANGLE |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_angle, line).groupdict())
angles = angles.append(fields, ignore_index=True)
# 6: DIHEDRAL
elif section <= 6 and re.match(re_dihedral, line):
section = 6
if verbose >= 1:
print("DIHEDRAL |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_dihedral,
line).groupdict())
dihedrals = dihedrals.append(fields, ignore_index=True)
# 7: IMPROPER
elif section <= 7 and re.match(re_improper, line):
section = 7
if verbose >= 1:
print("IMPROPER |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_improper,
line).groupdict())
impropers = impropers.append(fields, ignore_index=True)
# 8: HBOND
elif section <= 8 and re.match(re_hbond, line):
section = 8
if verbose >= 1:
print("HBOND |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_hbond, line).groupdict())
hbonds = hbonds.append(fields, ignore_index=True)
# 9: VDW (equivalent types)
elif section <= 9 and re.match(re_atomlist, line):
section = 9
if verbose >= 1:
print("VDW EQUIVALENT |{0}".format(line.rstrip()))
fields = [{"type_{0}".format(i): v for i, v in
enumerate(re.compile(amber_regex("{t}")).findall(line))}]
vdw_eq_types = vdw_eq_types.append(fields,
ignore_index=True)
# 10: VDW (format)
elif section <= 10.3 and re.match(re_vdw_format, line):
if verbose >= 1:
print("VDW FORMAT |{0}".format(line.rstrip()))
# 10.2: VDW (radius and well depth)
elif section <= 10.2 and re.match(re_vdw, line):
section = 10.2
if verbose >= 1:
print("VDW |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_vdw, line).groupdict())
vdw_types = vdw_types.append(fields, ignore_index=True)
# 11: LJEDIT (title)
elif (section <= 11 and re.match(re_ljedit_title, line)):
section = 11
if verbose >= 1:
print("LJEDIT |{0}".format(line.rstrip()))
# 11.1: LJEDIT (atom types, radii, and well depth)
elif (section <= 11.1 and re.match(re_ljedit, line)):
section = 11.1
if verbose >= 1:
print("LJEDIT |{0}".format(line.rstrip()))
fields = strip_dict(re.match(re_ljedit, line).groupdict())
ljedits = ljedits.append(fields, ignore_index=True)
# END
elif re.match(re_end, line):
if verbose >= 1:
print("END |{0}".format(line.rstrip()))
break
# NO MATCH
else:
if verbose >= 1:
print("NOMATCH |{0}".format(line.rstrip()))
line = open_infile.readline()
if debug >= 1:
print(mass_types)
print(hydrophobic_types)
print(bonds)
print(angles)
print(dihedrals)
print(impropers)
print(hbonds)
print(vdw_eq_types)
print(vdw_types)
print(ljedits)
parameters = dict(
mass_types = mass_types,
hydrophobic_types = hydrophobic_types,
bonds = bonds,
angles = angles,
dihedrals = dihedrals,
impropers = impropers,
hbonds = hbonds,
vdw_eq_types = vdw_eq_types,
vdw_types = vdw_types,
ljedits = ljedits)
return parameters
# def read_frcmod(self, infile, verbose=1, debug=0, **kwargs):
# """
# Arguments:
# infile (str): Path to input lib file
# verbose (int): Enable verbose output
# debug (int): Enable debug output
# kwargs (dict): Additional keyword arguments
# """
# if verbose >= 1:
# print("READING FRCMOD: {0}".format(infile))
#
# are = self.amber_regex
# strip_dict = self.strip_dict
# re_blank = are("^\s*$")
#
# section = 1
# with open(infile, "r") as open_infile:
# line = open_infile.readline()
# while line:
# # BLANK
# if re.match(re_blank, line):
# if verbose >= 1:
# print("BLANK |{0}".format(line.strip()))
# # 1: TITLE
# elif section <= 1 and not re.match(re_mass, line):
# if verbose >= 1:
# print("TITLE |{0}".format(line.strip()))
# # 2: MASS
# elif section <= 2 and re.match(re_mass, line):
# section = 2
# if verbose >= 1:
# print("MASS |{0}".format(line.strip()))
# fields = strip_dict(re.match(re_mass, line).groupdict())
# mass_types = mass_types.append(fields, ignore_index=True)
# # 3: HYDROPHIC (list of types)
# elif section <= 3 and re.match(re_atomlist, line):
# section = 3
# if verbose >= 1:
# print("HYDROPHOBIC |{0}".format(line.rstrip()))
# fields = [{"type": v} for v in are("{t}").findall(line)]
# hydrophobic_types = hydrophobic_types.append(fields,
# ignore_index=True)
# # 4: BOND
# elif section <= 4 and re.match(re_bond, line):
# section = 4
# if verbose >= 1:
# print("BOND |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_bond, line).groupdict())
# bonds = bonds.append(fields, ignore_index=True)
# # 5: ANGLE
# elif section <= 5 and re.match(re_angle, line):
# section = 5
# if verbose >= 1:
# print("ANGLE |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_angle, line).groupdict())
# angles = angles.append(fields, ignore_index=True)
# # 6: DIHEDRAL
# elif section <= 6 and re.match(re_dihedral, line):
# section = 6
# if verbose >= 1:
# print("DIHEDRAL |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_dihedral,
# line).groupdict())
# dihedrals = dihedrals.append(fields, ignore_index=True)
# # 7: IMPROPER
# elif section <= 7 and re.match(re_improper, line):
# section = 7
# if verbose >= 1:
# print("IMPROPER |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_improper,
# line).groupdict())
# impropers = impropers.append(fields, ignore_index=True)
# # 8: HBOND
# elif section <= 8 and re.match(re_hbond, line):
# section = 8
# if verbose >= 1:
# print("HBOND |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_hbond, line).groupdict())
# hbonds = hbonds.append(fields, ignore_index=True)
# # 9: VDW (equivalent types)
# elif section <= 9 and re.match(re_atomlist, line):
# section = 9
# if verbose >= 1:
# print("VDW EQUIVALENT |{0}".format(line.rstrip()))
# fields = [{"type_{0}".format(i): v for i, v in
# enumerate(re.compile(are("{t}")).findall(line))}]
# vdw_eq_types = vdw_eq_types.append(fields,
# ignore_index=True)
# # 10: VDW (format)
# elif section <= 10.3 and re.match(re_vdw_format, line):
# if verbose >= 1:
# print("VDW FORMAT |{0}".format(line.rstrip()))
# # 10.2: VDW (radius and well depth)
# elif section <= 10.2 and re.match(re_vdw, line):
# section = 10.2
# if verbose >= 1:
# print("VDW |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_vdw, line).groupdict())
# vdw_types = vdw_types.append(fields, ignore_index=True)
# # 11: LJEDIT (title)
# elif (section <= 11 and re.match(re_ljedit_title, line)):
# section = 11
# if verbose >= 1:
# print("LJEDIT |{0}".format(line.rstrip()))
# # 11.1: LJEDIT (atom types, radii, and well depth)
# elif (section <= 11.1 and re.match(re_ljedit, line)):
# section = 11.1
# if verbose >= 1:
# print("LJEDIT |{0}".format(line.rstrip()))
# fields = strip_dict(re.match(re_ljedit, line).groupdict())
# ljedits = ljedits.append(fields, ignore_index=True)
# # END
# elif re.match(re_end, line):
# if verbose >= 1:
# print("END |{0}".format(line.rstrip()))
# break
# # NO MATCH
# else:
# if verbose >= 1:
# print("NOMATCH |{0}".format(line.rstrip()))
# line = infile.readline()
# def read_lib(self, infile, verbose=1, debug=0, **kwargs):
# """
# Arguments:
# infile (str): Path to input lib file
# verbose (int): Enable verbose output
# debug (int): Enable debug output
# kwargs (dict): Additional keyword arguments
# """
# if verbose >= 1:
# print("READING LIB: {0}".format(infile))
#
# stripd = self.strip_dict
# re_blank = self.lib_re["blank"]
# re_atoms = self.lib_re["atoms"]
# re_atom_edits = self.lib_re["atom_edits"]
# re_box = self.lib_re["box"]
# re_res_seq = self.lib_re["res_seq"]
# re_res_connect = self.lib_re["res_connect"]
# re_bonds = self.lib_re["bonds"]
# re_hierarchy = self.lib_re["hierarchy"]
# re_name = self.lib_re["name"]
# re_coordinates = self.lib_re["coordinates"]
# re_res_connect2 = self.lib_re["res_connect2"]
# re_residues = self.lib_re["residues"]
# re_pdb_seq = self.lib_re["pdb_seq"]
# re_solventcap = self.lib_re["solventcap"]
# re_velocities = self.lib_re["velocities"]
#
# # Regular expressions for titles
# re_t_atoms = self.amber_regex("atoms", title=True)
# re_t_atom_edits = self.amber_regex("atomspertinfo", title=True)
# re_t_box = self.amber_regex("boundbox", title=True)
# re_t_res_seq = self.amber_regex("childsequence", title=True)
# re_t_res_connect = self.amber_regex("connect", title=True)
# re_t_bonds = self.amber_regex("connectivity", title=True)
# re_t_hierarchy = self.amber_regex("hierarchy", title=True)
# re_t_name = self.amber_regex("name", title=True)
# re_t_coordinates = self.amber_regex("positions", title=True)
# re_t_res_connect2 = self.amber_regex("residueconnect", title=True)
# re_t_residues = self.amber_regex("residues", title=True)
# re_t_pdb_seq = self.amber_regex("residuesPdbSequenceNumber",
# title=True)
# re_t_solventcap = self.amber_regex("solventcap", title=True)
# re_t_velocities = self.amber_regex("velocities", title=True)
#
# # Regular expressions for contents
# section = 0
# residue = None
#
# with open(infile, "r") as open_infile:
# line = open_infile.readline()
# while line:
# # BLANK
# if re.match(re_blank, line):
# if verbose >= 1:
# print("BLANK |{0}".format(line.strip()))
# # 1: ATOMS
# elif re.match(re_t_atoms, line):
# if verbose >= 1:
# print("ATOMS |{0}".format(line.strip()))
# section = 1
# fields = stripd(re.match(re_t_atoms, line).groupdict())
# residue = self.residues[fields["residue_name"]] = {}
# residue["atoms"] = pd.DataFrame(columns=
# ["name", "type", "type_index", "residue_index", "flags",
# "atom_index", "element", "charge", "note"])
# elif section == 1 and re.match(re_atoms, line):
# if verbose >= 1:
# print("ATOMS |{0}".format(line.strip()))
# fields = stripd(re.match(re_atoms, line).groupdict())
# residue["atoms"] = residue["atoms"].append(
# fields, ignore_index=True)
# # 2: ATOMSPERTINFO
# elif re.match(re_t_atom_edits, line):
# if verbose >= 1:
# print("ATOMSPERTINFO |{0}".format(line.strip()))
# section = 2
# residue["atom_edits"] = pd.DataFrame(columns=
# ["name", "type", "type_index", "element", "charge",
# "note"])
# elif section == 2 and re.match(re_atom_edits, line):
# if verbose >= 1:
# print("ATOMSPERTINFO |{0}".format(line.strip()))
# fields = stripd(re.match(re_atom_edits, line).groupdict())
# residue["atom_edits"] = residue["atom_edits"].append(
# fields, ignore_index=True)
# # 3: BOUNDBOX
# elif re.match(re_t_box, line):
# if verbose >= 1:
# print("BOUNDBOX |{0}".format(line.strip()))
# section = 3
# box_keys = ["box", "angle", "x_length", "y_length",
# "z_length"]
# box_items = []
# elif section == 3 and re.match(re_box, line):
# if verbose >= 1:
# print("BOUNDBOX |{0}".format(line.strip()))
# fields = stripd(re.match(re_box, line).groupdict())
# box_items.append(
# (box_keys.pop(0), [fields["box"]]))
# if len(box_keys) == 0:
# residue["box"] = pd.DataFrame.from_items(box_items)
# # 4: CHILDSEQUENCE
# elif re.match(re_t_res_seq, line):
# if verbose >= 1:
# print("CHILDSEQUENCE |{0}".format(line.strip()))
# section = 4
# residue["res_seq"] = pd.DataFrame(columns=
# ["childsequence", "note"])
# elif section == 4 and re.match(re_res_seq, line):
# if verbose >= 1:
# print("CHILDSEQUENCE |{0}".format(line.strip()))
# fields = stripd(re.match(re_res_seq, line).groupdict())
# residue["res_seq"] = residue["res_seq"].append(
# fields, ignore_index=True)
# # 5: CONNECT
# elif re.match(re_t_res_connect, line):
# if verbose >= 1:
# print("CONNECT |{0}".format(line.strip()))
# section = 5
# connect_keys = [
# "connect_atom_index_1", "connect_atom_index_2", "note"]
# connect_items = []
# elif section == 5 and re.match(re_res_connect, line):
# if verbose >= 1:
# print("CONNECT |{0}".format(line.strip()))
# fields = stripd(re.match(re_res_connect, line).groupdict())
# connect_items.append(
# (connect_keys.pop(0), [fields["connect"]]))
# if len(connect_keys) == 0:
# residue["res_connect"] = pd.DataFrame.from_items(
# connect_items)
# # 6: CONNECTIVITY
# elif re.match(re_t_bonds, line):
# if verbose >= 1:
# print("CONNECTIVITY |{0}".format(line.strip()))
# section = 6
# residue["bonds"] = pd.DataFrame(columns=
# ["atom_index_1", "atom_index_2", "flag", "note"])
# elif section == 6 and re.match(re_bonds, line):
# if verbose >= 1:
# print("CONNECTIVITY |{0}".format(line.strip()))
# fields = stripd(re.match(re_bonds,
# line).groupdict())
# residue["bonds"] = residue["bonds"].append(
# fields, ignore_index=True)
# # 7: HIERARCHY
# elif re.match(re_t_hierarchy, line):
# if verbose >= 1:
# print("HIERARCHY |{0}".format(line.strip()))
# section = 7
# residue["hierarchy"] = pd.DataFrame(columns=
# ["above_type", "above_index", "below_type",
# "below_index", "note"])
# elif section == 7 and re.match(re_hierarchy, line):
# if verbose >= 1:
# print("HIERARCHY |{0}".format(line.strip()))
# fields = stripd(re.match(re_hierarchy,
# line).groupdict())
# residue["hierarchy"] = residue["hierarchy"].append(
# fields, ignore_index=True)
# # 8: NAME
# elif re.match(re_t_name, line):
# if verbose >= 1:
# print("NAME |{0}".format(line.strip()))
# section = 8
# residue["name"] = pd.DataFrame(columns=
# ["childsequence", "note"])
# elif section == 8 and re.match(re_name, line):
# if verbose >= 1:
# print("NAME |{0}".format(line.strip()))
# fields = stripd(re.match(re_name, line).groupdict())
# residue["name"] = residue["name"].append(
# fields, ignore_index=True)
# # 9: POSITIONS
# elif re.match(re_t_coordinates, line):
# if verbose >= 1:
# print("POSITIONS |{0}".format(line.strip()))
# section = 9
# residue["coordinates"] = pd.DataFrame(columns=
# ["x", "y", "z", "note"])
# elif section == 9 and re.match(re_coordinates, line):
# if verbose >= 1:
# print("POSITIONS |{0}".format(line.strip()))
# fields = stripd(re.match(re_coordinates,
# line).groupdict())
# residue["coordinates"] = residue["coordinates"].append(
# fields, ignore_index=True)
# # 10: RESIDUECONNECT
# elif re.match(re_t_res_connect2, line):
# if verbose >= 1:
# print("RESIDUECONNECT |{0}".format(line.strip()))
# section = 10
# residue["res_connect2"] = pd.DataFrame(columns=
# ["atom_index_1", "atom_index_2", "atom_index_3",
# "atom_index_4", "atom_index_5", "atom_index_6", "note"])
# elif section == 10 and re.match(re_res_connect2, line):
# if verbose >= 1:
# print("RESIDUECONNECT |{0}".format(line.strip()))
# fields = stripd(re.match(re_res_connect2,
# line).groupdict())
# residue["res_connect2"] = residue["res_connect2"].append(
# fields, ignore_index=True)
# # 11: RESIDUES
# elif re.match(re_t_residues, line):
# if verbose >= 1:
# print("RESIDUES |{0}".format(line.strip()))
# section = 11
# residue["residues"] = pd.DataFrame(columns=
# ["name", "residue_index", "child_atom_index",
# "start_atom_index", "residue_type", "note"])
# elif re.match(re_residues, line):
# if verbose >= 1:
# print("RESIDUES |{0}".format(line.strip()))
# fields = stripd(re.match(re_residues,
# line).groupdict())
# residue["residues"] = residue["residues"].append(
# fields, ignore_index=True)
# # 12: RESIDUESPDBSEQUENCENUMBER
# elif re.match(re_t_pdb_seq, line):
# if verbose >= 1:
# print("PDBSEQUENCENUM |{0}".format(line.strip()))
# section = 12
# residue["pdb_seq"] = pd.DataFrame(columns=
# ["residue_index", "note"])
# elif section == 12 and re.match(re_pdb_seq, line):
# if verbose >= 1:
# print("PDBSEQUENCENUM |{0}".format(line.strip()))
# fields = stripd(re.match(re_pdb_seq, line).groupdict())
# residue["pdb_seq"] = residue["pdb_seq"].append(
# fields, ignore_index=True)
# # 13: SOLVENTCAP
# elif re.match(re_t_solventcap, line):
# if verbose >= 1:
# print("SOLVENTCAP |{0}".format(line.strip()))
# section = 13
# solventcap_keys = ["solventcap", "angle", "x_length",
# "y_length", "z_length"]
# solventcap_temp = []
# elif section == 13 and re.match(re_solventcap, line):
# if verbose >= 1:
# print("SOLVENTCAP |{0}".format(line.strip()))
# fields = stripd(re.match(re_solventcap, line).groupdict())
# solventcap_temp.append(
# (solventcap_keys.pop(0), [fields["solventcap"]]))
# if len(solventcap_keys) == 0:
# residue["solventcap"] = pd.DataFrame.from_items(
# solventcap_temp)
# # 14: VELOCITIES
# elif re.match(re_t_velocities, line):
# if verbose >= 1:
# print("VELOCITIES |{0}".format(line.strip()))
# section = 14
# residue["velocities"] = pd.DataFrame(columns=
# ["x", "y", "z", "note"])
# elif section == 14 and re.match(re_velocities, line):
# if verbose >= 1:
# print("VELOCITIES |{0}".format(line.strip()))
# fields = stripd(re.match(re_velocities,
# line).groupdict())
# residue["velocities"] = residue["velocities"].append(
# fields, ignore_index=True)
# # NO MATCH
# else:
# if verbose >= 1:
# print("NOMATCH |{0}".format(line.rstrip()))
# line = open_infile.readline()
# for name in sorted(self.residues):
# residue = self.residues[name]
# print()
# print(name)
# fields = ["atoms", "atom_edits", "box", "childsequence",
# "connect", "bonds", "hierarchy", "name",
# "coordinates", "residueconnect", "residues",
# "pdbindex", "solventcap", "velocities"]
# for field in fields:
# if field in residue:
# print(field)
# print(residue[field])
| bsd-3-clause |
jjx02230808/project0223 | examples/decomposition/plot_sparse_coding.py | 12 | 4007 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| bsd-3-clause |
cybernet14/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
anselmobd/fo2 | script/mails.py | 1 | 1576 | import pandas as pd
mails = pd.read_csv('../clientes.csv', sep=";", nrows=5)
print(mails.head())
col1 = mails[['NOME_CLIENTE', 'E_MAIL']]
print(col1)
col2 = mails[['NOME_CLIENTE', 'NFE_E_MAIL']]
col2 = col2.rename(columns={"NFE_E_MAIL": "E_MAIL"})
print(col2)
col = pd.concat([col1, col2], ignore_index=True)
print(col)
# print('sort_values')
# col = col.sort_values(['NOME_CLIENTE', 'E_MAIL'])
# print(col)
print('drop_duplicates')
col = col.drop_duplicates(subset =None, keep = 'first')
print(col)
col = col.reset_index()
print(col)
col["E_MAIL"] = col["E_MAIL"].str.split(",")
print(col)
col = col.apply( pd.Series.explode )
print(col)
col["E_MAIL"] = col["E_MAIL"].str.strip()
print(col)
print('drop_duplicates')
col = col.drop_duplicates(subset =None, keep = 'first')
print(col)
col = col.reset_index()
print(col)
col["E_MAIL"] = col["E_MAIL"].str.split(";")
print(col)
col = col.apply( pd.Series.explode )
print(col)
col["E_MAIL"] = col["E_MAIL"].str.strip()
print(col)
print('drop_duplicates')
col = col.drop_duplicates(subset =None, keep = 'first')
print(col)
# # col = col.set_index(['NOME_CLIENTE'])
# # print(col)
# col = col.stack()
# print(col)
# col = col.str.split(',', expand=True)
# print(col)
# col = col.stack()
# print(col)
# col = col.unstack(-2)
# print(col)
# col = col.reset_index(-1, drop=True)
# print(col)
# col = col.reset_index()
# print(col)
# (col.set_index(['NOME_CLIENTE'])
# .stack()
# .str.split(',', expand=True)
# .stack()
# .unstack(-2)
# .reset_index(-1, drop=True)
# .reset_index()
# )
| mit |
rew4332/tensorflow | tensorflow/contrib/learn/python/learn/tests/base_test.py | 1 | 11936 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test base estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators import base
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import log_loss
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
# TODO(b/29580537): Remove when we deprecate feature column inference.
class InferredfeatureColumnTest(tf.test.TestCase):
"""Test base estimators."""
def testOneDim(self):
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
regressor = learn.TensorFlowLinearRegressor()
regressor.fit(x, y)
score = mean_squared_error(y, regressor.predict(x))
self.assertLess(score, 1.0, "Failed with score = {0}".format(score))
def testIris(self):
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(iris.data, [x for x in iris.target])
score = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertGreater(score, 0.7, "Failed with score = {0}".format(score))
def testIrisClassWeight(self):
iris = datasets.load_iris()
# Note, class_weight are not supported anymore :( Use weight_column.
with self.assertRaises(ValueError):
classifier = learn.TensorFlowLinearClassifier(
n_classes=3, class_weight=[0.1, 0.8, 0.1])
classifier.fit(iris.data, iris.target)
score = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertLess(score, 0.7, "Failed with score = {0}".format(score))
def testIrisAllVariables(self):
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(iris.data, [x for x in iris.target])
self.assertEqual(
classifier.get_variable_names(),
["centered_bias_weight",
"centered_bias_weight/Adagrad",
"global_step",
# Double slashes appear because the column name is empty. If it was not
# empty, the variable names would be "linear/column_name/_weight" etc.
"linear//_weight",
"linear//_weight/Ftrl",
"linear//_weight/Ftrl_1",
"linear/bias_weight",
"linear/bias_weight/Ftrl",
"linear/bias_weight/Ftrl_1"])
def testIrisSummaries(self):
iris = datasets.load_iris()
output_dir = tempfile.mkdtemp() + "learn_tests/"
classifier = learn.TensorFlowLinearClassifier(n_classes=3,
model_dir=output_dir)
classifier.fit(iris.data, iris.target)
score = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
# TODO(ipolosukhin): Check that summaries are correclty written.
def testIrisContinueTraining(self):
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(n_classes=3,
learning_rate=0.01,
continue_training=True,
steps=250)
classifier.fit(iris.data, iris.target)
score1 = accuracy_score(iris.target, classifier.predict(iris.data))
classifier.fit(iris.data, iris.target, steps=500)
score2 = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertGreater(
score2, score1,
"Failed with score2 {0} <= score1 {1}".format(score2, score1))
def testIrisStreaming(self):
iris = datasets.load_iris()
def iris_data():
while True:
for x in iris.data:
yield x
def iris_predict_data():
for x in iris.data:
yield x
def iris_target():
while True:
for y in iris.target:
yield y
classifier = learn.TensorFlowLinearClassifier(n_classes=3, steps=100)
classifier.fit(iris_data(), iris_target())
score1 = accuracy_score(iris.target, classifier.predict(iris.data))
score2 = accuracy_score(iris.target,
classifier.predict(iris_predict_data()))
self.assertGreater(score1, 0.5, "Failed with score = {0}".format(score1))
self.assertEqual(score2, score1, "Scores from {0} iterator doesn't "
"match score {1} from full "
"data.".format(score2, score1))
def testIris_proba(self):
# If sklearn available.
if log_loss:
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowClassifier(n_classes=3, steps=250)
classifier.fit(iris.data, iris.target)
score = log_loss(iris.target, classifier.predict_proba(iris.data))
self.assertLess(score, 0.8, "Failed with score = {0}".format(score))
def testBoston(self):
random.seed(42)
boston = datasets.load_boston()
regressor = learn.TensorFlowLinearRegressor(batch_size=boston.data.shape[0],
steps=500,
learning_rate=0.001)
regressor.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, regressor.predict(boston.data))
self.assertLess(score, 150, "Failed with score = {0}".format(score))
class BaseTest(tf.test.TestCase):
"""Test base estimators."""
def testOneDim(self):
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
feature_columns = learn.infer_real_valued_columns_from_input(x)
regressor = learn.TensorFlowLinearRegressor(feature_columns=feature_columns)
regressor.fit(x, y)
score = mean_squared_error(y, regressor.predict(x))
self.assertLess(score, 1.0, "Failed with score = {0}".format(score))
def testIris(self):
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, [x for x in iris.target])
score = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertGreater(score, 0.7, "Failed with score = {0}".format(score))
def testIrisClassWeight(self):
iris = datasets.load_iris()
# Note, class_weight are not supported anymore :( Use weight_column.
with self.assertRaises(ValueError):
classifier = learn.TensorFlowLinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3, class_weight=[0.1, 0.8, 0.1])
classifier.fit(iris.data, iris.target)
score = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertLess(score, 0.7, "Failed with score = {0}".format(score))
def testIrisAllVariables(self):
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3)
classifier.fit(iris.data, [x for x in iris.target])
self.assertEqual(
classifier.get_variable_names(),
["centered_bias_weight",
"centered_bias_weight/Adagrad",
"global_step",
# Double slashes appear because the column name is empty. If it was not
# empty, the variable names would be "linear/column_name/_weight" etc.
"linear//_weight",
"linear//_weight/Ftrl",
"linear//_weight/Ftrl_1",
"linear/bias_weight",
"linear/bias_weight/Ftrl",
"linear/bias_weight/Ftrl_1"])
def testIrisSummaries(self):
iris = datasets.load_iris()
output_dir = tempfile.mkdtemp() + "learn_tests/"
classifier = learn.TensorFlowLinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3, model_dir=output_dir)
classifier.fit(iris.data, iris.target)
score = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
# TODO(ipolosukhin): Check that summaries are correclty written.
def testIrisContinueTraining(self):
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3,
learning_rate=0.01,
continue_training=True,
steps=250)
classifier.fit(iris.data, iris.target)
score1 = accuracy_score(iris.target, classifier.predict(iris.data))
classifier.fit(iris.data, iris.target, steps=500)
score2 = accuracy_score(iris.target, classifier.predict(iris.data))
self.assertGreater(
score2, score1,
"Failed with score2 {0} <= score1 {1}".format(score2, score1))
def testIrisStreaming(self):
iris = datasets.load_iris()
def iris_data():
while True:
for x in iris.data:
yield x
def iris_predict_data():
for x in iris.data:
yield x
def iris_target():
while True:
for y in iris.target:
yield y
classifier = learn.TensorFlowLinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3, steps=100)
classifier.fit(iris_data(), iris_target())
score1 = accuracy_score(iris.target, classifier.predict(iris.data))
score2 = accuracy_score(iris.target,
classifier.predict(iris_predict_data()))
self.assertGreater(score1, 0.5, "Failed with score = {0}".format(score1))
self.assertEqual(score2, score1, "Scores from {0} iterator doesn't "
"match score {1} from full "
"data.".format(score2, score1))
def testIris_proba(self):
# If sklearn available.
if log_loss:
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(iris.data),
n_classes=3, steps=250)
classifier.fit(iris.data, iris.target)
score = log_loss(iris.target, classifier.predict_proba(iris.data))
self.assertLess(score, 0.8, "Failed with score = {0}".format(score))
def testBoston(self):
random.seed(42)
boston = datasets.load_boston()
regressor = learn.TensorFlowLinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(boston.data),
batch_size=boston.data.shape[0],
steps=500,
learning_rate=0.001)
regressor.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, regressor.predict(boston.data))
self.assertLess(score, 150, "Failed with score = {0}".format(score))
def testUnfitted(self):
estimator = learn.TensorFlowEstimator(model_fn=None, n_classes=1)
with self.assertRaises(base.NotFittedError):
estimator.predict([1, 2, 3])
with self.assertRaises(base.NotFittedError):
estimator.save("/tmp/path")
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
ekansa/open-context-py | opencontext_py/apps/utilities/one-off-processes-two.py | 1 | 111312 | """
One off processing scripts to handle edge cases, cleanup, and straggler data
"""
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.imports.records.models import ImportCell
from opencontext_py.apps.ldata.linkentities.models import LinkEntity
from opencontext_py.apps.ldata.linkannotations.models import LinkAnnotation
from opencontext_py.apps.ocitems.identifiers.ezid.ezid import EZID
from opencontext_py.apps.ocitems.strings.manage import StringManagement
from opencontext_py.apps.ocitems.assertions.models import Assertion
ezid = EZID()
# ezid.ark_shoulder = EZID.ARK_TEST_SHOULDER
source_id = 'ref:2348747658045'
pred_uuid = '74b9bacf-e5e8-4f3a-b43d-18bab4b2d635'
project_uuid = '141e814a-ba2d-4560-879f-80f1afb019e9'
pdf_base_link = 'https://archive.org/download/ArchaeologyOfAnImage/Archaeology-of-an-Image.pdf'
pdf_page_offset = 0
page_link_dict = {}
uuid_pages = {}
imp_uuid_cells = ImportCell.objects.filter(source_id=source_id, field_num=5)
for imp_uuid in imp_uuid_cells:
uuid = imp_uuid.record
man_objs = Manifest.objects.filter(uuid=uuid)[:1]
if len(man_objs) > 0:
man_obj = man_objs[0]
# get pages
imp_page_cells = ImportCell.objects.filter(source_id=source_id, field_num=3, row_num=imp_uuid.row_num)[:1]
imp_link_cells = ImportCell.objects.filter(source_id=source_id, field_num=6, row_num=imp_uuid.row_num)[:1]
imp_link = imp_link_cells[0]
page_str = imp_page_cells[0].record
page_ex = page_str.split(',')
page_links = []
for page in page_ex:
page = page.strip()
page_num = None
ark_uri = None
try:
page_num = int(float(page))
except:
page_num = None
if len(page) > 0 and isinstance(page_num, int):
if uuid not in uuid_pages:
uuid_pages[uuid] = []
if page_num not in uuid_pages[uuid]:
uuid_pages[uuid].append(page_num)
pdf_page = pdf_page_offset + page_num
pdf_link = pdf_base_link + '#page=' + str(pdf_page)
if pdf_link not in page_link_dict:
page_link_dict[pdf_link] = {
'ark_uri': None,
'uuids': []
}
meta = {
'erc.who': 'Mark Lehner',
'erc.what': 'The Archaeology of an Image: The Great Sphinx of Giza (Page: ' + page + ')',
'erc.when': 1991
}
url = pdf_link
ark_id = ezid.mint_identifier(url, meta, 'ark')
if isinstance(ark_id, str):
ark_uri = 'https://n2t.net/' + ark_id
page_link_dict[pdf_link]['ark_uri'] = ark_uri
else:
ark_uri = page_link_dict[pdf_link]['ark_uri']
if uuid not in page_link_dict[pdf_link]['uuids']:
page_link_dict[pdf_link]['uuids'].append(uuid)
if isinstance(ark_uri, str):
print('Page: ' + page + ' at: ' + ark_uri + ' to: ' + pdf_link)
a_link = '<a href="' + ark_uri + '" target="_blank" title="Jump to page ' + page + ' in the dissertation">' + page + '</a>'
if a_link not in page_links:
page_links.append(a_link)
all_pages = ', '.join(page_links)
imp_link.record = all_pages
imp_link.save()
imp_link_cells = ImportCell.objects.filter(source_id=source_id, field_num=6, row_num=imp_uuid.row_num)[:1]
all_pages = imp_link_cells[0].record
page_notes = '<div><p>Associated pages:</p> <p>' + all_pages + '</p></div>'
str_m = StringManagement()
str_m.project_uuid = man_obj.project_uuid
str_m.source_id = source_id
str_obj = str_m.get_make_string(page_notes)
Assertion.objects.filter(uuid=man_obj.uuid, predicate_uuid=pred_uuid).delete()
new_ass = Assertion()
new_ass.uuid = man_obj.uuid
new_ass.subject_type = man_obj.item_type
new_ass.project_uuid = man_obj.project_uuid
new_ass.source_id = source_id
new_ass.obs_node = '#obs-1'
new_ass.obs_num = 1
new_ass.sort = 50
new_ass.visibility = 1
new_ass.predicate_uuid = pred_uuid # predicate note for about non-specialist
new_ass.object_type = 'xsd:string'
new_ass.object_uuid = str_obj.uuid # tb entry
try:
new_ass.save()
except:
pass
# Makes a note that a cataloged item was described by a specialist
# Makes cataloging descriptions appear in the 2nd observation tab
from opencontext_py.apps.ocitems.strings.manage import StringManagement
from opencontext_py.apps.ocitems.obsmetadata.models import ObsMetadata
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.entities.entity.models import Entity
has_sp_des_pred = '7c053560-2385-43af-af11-6e58afdbeb10'
sp_note_pred = 'b019efa8-c67a-4641-9340-b667ab73d498'
sp_pred_asses = Assertion.objects.filter(predicate_uuid=has_sp_des_pred)
change_obj_types = [
"types",
"xsd:integer",
"xsd:double",
"xsd:date",
"xsd:string",
"complex-description",
"xsd:boolean"
]
class_lookups = {
'oc-gen:cat-human-bone': 'Suellen Gauld (Bioarchaeology / Human Remains)',
'oc-gen:cat-animal-bone': 'Sarah Whitcher Kansa (Zooarchaeology / Animal Remains)'
}
change_source_ids = []
uuid_entities = {}
for ass in sp_pred_asses:
uuid = ass.uuid
if uuid not in uuid_entities:
uuid_entities[uuid] = []
entity = Entity()
found = entity.dereference(ass.object_uuid)
print('found a ' + entity.class_uri)
uuid_entities[uuid].append(entity)
for ass in sp_pred_asses:
uuid = ass.uuid
print('Update: ' + uuid)
note = '<div>'
note += '<p><strong>Catalog Record with Specialist Descriptions</strong></p>'
note += '<p>This catalog record has additional descriptive information provided by one or more '
note += 'specialized researchers. Specialist provided descriptions should be regarded as more '
note += 'authoritative.</p>'
note += '<br/>'
note += '<p>Links to Specialist Records:</p>'
note += '<ul class="list-unstyled">'
for entity in uuid_entities[uuid]:
note += '<li>'
note += '<a target="_blank" href="../../subjects/' + entity.uuid + '">' + entity.label + '</a>'
note += '; described by '
note += class_lookups[entity.class_uri]
note += '</li>'
note += '</ul>'
note += '</div>'
str_m = StringManagement()
str_m.project_uuid = ass.project_uuid
str_m.source_id = 'catalog-specialist-note'
str_obj = str_m.get_make_string(note)
new_ass = Assertion()
new_ass.uuid = uuid
new_ass.subject_type = ass.subject_type
new_ass.project_uuid = ass.project_uuid
new_ass.source_id = 'catalog-specialist-note'
new_ass.obs_node = '#obs-1'
new_ass.obs_num = 1
new_ass.sort = 1
new_ass.visibility = 1
new_ass.predicate_uuid = sp_note_pred # predicate note for about non-specialist
new_ass.object_type = 'xsd:string'
new_ass.object_uuid = str_obj.uuid # tb entry
try:
new_ass.save()
except:
pass
change_asses = Assertion.objects\
.filter(uuid=uuid,
obs_num=1,
object_type__in=change_obj_types)\
.exclude(predicate_uuid=sp_note_pred)\
.exclude(source_id__startswith='sec-')\
.exclude(source_id='catalog-specialist-note')\
.exclude(visibility=0)
for change_ass in change_asses:
new_change_ass = change_ass
change_ass.visibility = 0
change_ass.save()
new_change_ass.hash_id = None
new_change_ass.visibility = 0
new_source_id = 'sec-' + change_ass.source_id
new_change_ass.source_id = new_source_id
new_change_ass.obs_node = '#obs-2'
new_change_ass.obs_num = 2
try:
new_change_ass.save()
except:
pass
if new_source_id not in change_source_ids:
# make new source metadata
ometa = ObsMetadata()
ometa.source_id = new_source_id
ometa.project_uuid = ass.project_uuid
ometa.obs_num = 2
ometa.label = 'Non-Specialist Description'
ometa.obs_type = 'oc-gen:primary'
ometa.note = 'From cataloging'
try:
ometa.save()
except:
pass
change_source_ids.append(new_source_id)
from opencontext_py.apps.ocitems.assertions.models import Assertion
pred_uuid = '59415979-72f8-4558-9e74-052fae4eed07'
asses = Assertion.objects.filter(predicate_uuid=pred_uuid)
for ass in asses:
asses_check = Assertion.objects.filter(uuid=ass.uuid,
predicate_uuid=pred_uuid)
if len(asses_check) > 1:
all_item_count = 0
print('Multiple counts for: ' + ass.uuid + ' source: ' + ass.source_id)
for item_ass in asses_check:
new_source_id = item_ass.source_id + '-fix'
try:
item_count = int(float(item_ass.data_num))
except:
item_count = 0
print('Item count: ' + str(item_count))
all_item_count += item_count
if all_item_count > 0:
new_ass = asses_check[0]
new_ass.hash_id = None
new_source_id = new_ass.source_id + '-fix'
new_ass.source_id = new_source_id
new_ass.data_num = all_item_count
new_ass.save()
bad_ass = Assertion.objects\
.filter(uuid=ass.uuid,
predicate_uuid=pred_uuid)\
.exclude(source_id=new_source_id)\
.delete()
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ldata.linkentities.models import LinkEntity
from opencontext_py.apps.ldata.linkannotations.models import LinkAnnotation
from opencontext_py.apps.ocitems.subjects.models import Subject
ca_subjects = Subject.objects.filter(context__startswith='United States/California',
project_uuid='416A274C-CF88-4471-3E31-93DB825E9E4A')
pred_uri = 'dc-terms:isReferencedBy'
hearst_uri = 'http://hearstmuseum.berkeley.edu'
for ca_subj in ca_subjects:
ok_mans = Manifest.objects.filter(uuid=ca_subj.uuid,
class_uri='oc-gen:cat-site')[:1]
annos = LinkAnnotation.objects.filter(subject=ca_subj.uuid,
predicate_uri=pred_uri,
object_uri=hearst_uri)[:1]
if len(ok_mans) > 0 and len(annos) < 1:
# we have a site in the manifest that has no links to the hearst
man_obj = ok_mans[0]
print('Relate Hearst to site: ' + man_obj.label)
la = LinkAnnotation()
la.subject = man_obj.uuid # the subordinate is the subject
la.subject_type = man_obj.item_type
la.project_uuid = man_obj.project_uuid
la.source_id = 'hearst-link'
la.predicate_uri = pred_uri
la.object_uri = hearst_uri
try:
la.save()
except:
pass
import json
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ocitems.subjects.models import Subject
from opencontext_py.apps.ocitems.geospace.models import Geospace
from opencontext_py.apps.indexer.reindex import SolrReIndex
from opencontext_py.apps.imports.records.models import ImportCell
from opencontext_py.apps.imports.fieldannotations.models import ImportFieldAnnotation
source_id = 'ref:1990625792930'
project_uuid = '416A274C-CF88-4471-3E31-93DB825E9E4A'
uuids = []
man_objs = Manifest.objects.filter(project_uuid=project_uuid,
class_uri='oc-gen:cat-site')
for man_obj in man_objs:
geos = Geospace.objects.filter(uuid=man_obj.uuid)[:1]
if len(geos) < 1:
# no geospatial data
label_cells = ImportCell.objects.filter(source_id=source_id,
field_num=1,
record=man_obj.uuid)
for label_cell in label_cells:
lat = None
lon = None
row_num = label_cell.row_num
lat_cells = ImportCell.objects.filter(source_id=source_id,
field_num=11,
row_num=row_num)[:1]
lon_cells = ImportCell.objects.filter(source_id=source_id,
field_num=12,
row_num=row_num)[:1]
if len(lat_cells) > 0 and len(lon_cells) >0:
try:
lat = float(lat_cells[0].record)
except:
lat = None
try:
lon = float(lon_cells[0].record)
except:
lon = None
if isinstance(lat, float) and isinstance(lon, float):
uuids.append(man_obj.uuid)
geo = Geospace()
geo.uuid = man_obj.uuid
geo.project_uuid = man_obj.project_uuid
geo.source_id = source_id + '-geofix'
geo.item_type = man_obj.item_type
geo.feature_id = 1
geo.meta_type = ImportFieldAnnotation.PRED_GEO_LOCATION
geo.ftype = 'Point'
geo.latitude = lat
geo.longitude = lon
geo.specificity = -11
# dump coordinates as json string in lon - lat (GeoJSON order)
geo.coordinates = json.dumps([lon, lat],
indent=4,
ensure_ascii=False)
try:
geo.save()
except:
print('Did not like ' + man_obj.label + ' uuid: ' + str(man_obj.uuid))
sri = SolrReIndex()
sri.reindex_uuids(uuids)
from opencontext_py.apps.ocitems.obsmetadata.models import ObsMetadata
from opencontext_py.apps.ocitems.assertions.observations import AssertionObservations
ometa = ObsMetadata()
ometa.source_id = 'ref:1716440680966'
ometa.project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
ometa.obs_num = 5
ometa.label = 'Grid Coordinates'
ometa.obs_type = 'oc-gen:primary'
ometa.note = 'X, Y, and sometimes Z spatial coordinates'
ometa.save()
class_uri = 'oc-gen:cat-object'
aos = AssertionObservations()
aos.change_obs_num_by_source_id(ometa.obs_num, ometa.source_id, class_uri)
class_uris = [
'oc-gen:cat-object',
'oc-gen:cat-arch-element',
'oc-gen:cat-glass',
'oc-gen:cat-pottery',
'oc-gen:cat-coin']
for class_uri in class_uris:
aos = AssertionObservations()
aos.change_obs_num_by_source_id(ometa.obs_num, ometa.source_id, class_uri)
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.manifest.models import Manifest
media_uuid = '6c89e96d-d97e-4dba-acbe-e822fc1f87e7'
project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
media_man = Manifest.objects.get(uuid=media_uuid)
if not isinstance(media_man.sup_json, dict):
meta = LastUpdatedOrderedDict()
else:
meta = media_man.sup_json
meta['Leaflet'] = LastUpdatedOrderedDict()
meta['Leaflet']['bounds'] = [[43.153660, 11.402448],[43.152420, 11.400873]]
meta['Leaflet']['label'] = 'Orientalizing, Archaic Features'
media_man.sup_json = meta
media_man.save()
Assertion.objects\
.filter(uuid=project_uuid,
predicate_uuid=Assertion.PREDICATES_GEO_OVERLAY)\
.delete()
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.manifest.models import Manifest
project_uuid = '10aa84ad-c5de-4e79-89ce-d83b75ed72b5'
media_uuid = 'da676164-9829-4798-bb5d-c5b1135daa27'
media_man = Manifest.objects.get(uuid=media_uuid)
ass = Assertion()
ass.uuid = project_uuid
ass.subject_type = 'projects'
ass.project_uuid = project_uuid
ass.source_id = 'heit-el-ghurab-geo-overlay'
ass.obs_node = '#obs-' + str(1)
ass.obs_num = 1
ass.sort = 1
ass.visibility = 1
ass.predicate_uuid = Assertion.PREDICATES_GEO_OVERLAY
ass.object_uuid = media_man.uuid
ass.object_type = media_man.item_type
ass.save()
from opencontext_py.apps.ocitems.identifiers.ezid.manage import EZIDmanage
from opencontext_py.apps.ocitems.manifest.models import Manifest
mans = Manifest.objects.filter(source_id='ref:2181193573133')
ezid_m = EZIDmanage()
for man in mans:
ezid_m.make_save_ark_by_uuid(man.uuid)
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.manifest.models import Manifest
media_uuid = 'da676164-9829-4798-bb5d-c5b1135daa27'
project_uuid = '5A6DDB94-70BE-43B4-2D5D-35D983B21515'
media_man = Manifest.objects.get(uuid=media_uuid)
if not isinstance(media_man.sup_json, dict):
meta = LastUpdatedOrderedDict()
else:
meta = media_man.sup_json
meta['Leaflet'] = LastUpdatedOrderedDict()
meta['Leaflet']['bounds'] = [[29.9686630883, 31.1427860408999], [29.9723641789999, 31.1396409363999]]
meta['Leaflet']['label'] = 'Heit el-Ghurab Areas'
media_man.sup_json = meta
media_man.save()
Assertion.objects\
.filter(uuid=project_uuid,
predicate_uuid=Assertion.PREDICATES_GEO_OVERLAY)\
.delete()
ass = Assertion()
ass.uuid = project_uuid
ass.subject_type = 'projects'
ass.project_uuid = project_uuid
ass.source_id = 'test-geo-overlay'
ass.obs_node = '#obs-' + str(1)
ass.obs_num = 1
ass.sort = 1
ass.visibility = 1
ass.predicate_uuid = Assertion.PREDICATES_GEO_OVERLAY
ass.object_uuid = media_man.uuid
ass.object_type = media_man.item_type
ass.save()
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.indexer.reindex import SolrReIndex
uuids = []
items = Manifest.objects.filter(project_uuid='416A274C-CF88-4471-3E31-93DB825E9E4A')
for item in items:
uuids.append(item.uuid)
print('Items to index: ' + str(len(uuids)))
sri = SolrReIndex()
sri.max_geo_zoom = 11
sri.reindex_uuids(uuids)
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.mediafiles.models import Mediafile
from opencontext_py.apps.indexer.reindex import SolrReIndex
project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
source_id = 'pc-iiif-backfill'
meds = Mediafile.objects.filter(project_uuid=project_uuid,
source_id=source_id)
req_types = ['oc-gen:thumbnail', 'oc-gen:preview', 'oc-gen:fullfile']
for media in meds:
for type in req_types:
media_ok = Mediafile.objects.filter(uuid=media.uuid, file_type=type)
if not media_ok:
print('Missing {} for {}'.format(type, media.uuid))
ia_fulls = Mediafile.objects.filter(uuid=media.uuid, file_type='oc-gen:ia-fullfile')[:1]
n_media = ia_fulls[0]
n_media.hash_id = None
n_media.source_id = ia_fulls[0].source_id
n_media.file_type = 'oc-gen:fullfile'
n_media.file_uri = ia_fulls[0].file_uri
n_media.save()
base_uri = media.file_uri.replace('/info.json', '')
for type in types:
n_media = media
n_media.hash_id = None
n_media.source_id = source_id
n_media.file_type = type['file_type']
n_media.file_uri = base_uri + type['suffix']
n_media.save()
ia_fulls = Mediafile.objects.filter(uuid=media.uuid, file_type='oc-gen:ia-fullfile')[:1]
if ia_fulls:
n_media = media
n_media.hash_id = None
n_media.source_id = source_id
n_media.file_type = 'oc-gen:fullfile'
n_media.file_uri = ia_fulls[0].file_uri
n_media.save()
fixed_media.append(media.uuid)
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.mediafiles.models import Mediafile
from opencontext_py.apps.indexer.reindex import SolrReIndex
project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
source_id = 'pc-iiif-backfill'
fixed_media = []
medias = Mediafile.objects.filter(project_uuid=project_uuid, source_id=source_id)
for media in medias:
if media.uuid not in fixed_media:
fixed_media.append(media.uuid)
uuids = fixed_media
ass_o = Assertion.objects.filter(uuid__in=fixed_media, object_type='subjects')
for ass in ass_o:
if ass.object_uuid not in uuids:
uuids.append(ass.object_uuid)
ass_s = Assertion.objects.filter(object_uuid__in=fixed_media, subject_type='subjects')
for ass in ass_s:
if ass.object_uuid not in uuids:
uuids.append(ass.object_uuid)
print('Items to index: ' + str(len(uuids)))
sri = SolrReIndex()
sri.reindex_uuids(uuids)
from opencontext_py.apps.imports.geojson.geojson import GeoJSONimport
gimp = GeoJSONimport()
gimp.load_into_importer = False
gimp.project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
gimp.source_id = 'vesco_trenches_2017_4326'
json_obj = gimp.load_json_file('pc-geo', 'vesco_trenches_2017_4326.geojson')
gimp.save_no_coord_file(json_obj, 'pc-geo', 'vesco_trenches_2017_4326.geojson')
from opencontext_py.apps.imports.geojson.geojson import GeoJSONimport
from opencontext_py.apps.ocitems.geospace.models import Geospace
print('Delete old PC geospatial data')
Geospace.objects\
.filter(project_uuid='DF043419-F23B-41DA-7E4D-EE52AF22F92F',
ftype__in=['Polygon', 'Multipolygon']).delete()
gimp = GeoJSONimport()
gimp.load_into_importer = False
gimp.project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
gimp.source_id = 'vesco_trenches_2017_4326'
json_obj = gimp.load_json_file('pc-geo', 'vesco_trenches_2017_4326.geojson')
vdm_props = {
'1': {'uri': 'https://opencontext.org/subjects/4C242A96-3C0A-4187-48CD-6287241F09CD'},
'10': {'uri': 'https://opencontext.org/subjects/F6E97C59-EE6F-4824-863E-6596AA68BE2D'},
'11': {'uri': 'https://opencontext.org/subjects/F6E97C59-EE6F-4824-863E-6596AA68BE2D'},
'12': {'uri': 'https://opencontext.org/subjects/F6E97C59-EE6F-4824-863E-6596AA68BE2D'},
'13': {'uri': 'https://opencontext.org/subjects/F6E97C59-EE6F-4824-863E-6596AA68BE2D'},
'14': {'uri': 'https://opencontext.org/subjects/E252E83F-68D7-4671-85E3-70ED3A0A62B3'},
'15': {'uri': 'https://opencontext.org/subjects/8D6B6694-6E88-4D3F-9494-A9EE95C78B44'},
'16': {'uri': 'https://opencontext.org/subjects/8D6B6694-6E88-4D3F-9494-A9EE95C78B44'},
'17': {'uri': 'https://opencontext.org/subjects/6d37f225-f83a-4d6b-8e6a-0b138b29f236'},
'18': {'uri': 'https://opencontext.org/subjects/33e3d75f-7ba0-4d64-b36c-96daf288d06e'},
'19': {'uri': 'https://opencontext.org/subjects/ce22d11f-721a-4050-9576-f807a25ddefa'},
'2': {'uri': 'https://opencontext.org/subjects/25A27283-05AF-42E2-C839-3D8605EEC6BD'},
'20': {'uri': 'https://opencontext.org/subjects/c7049909-f2de-4b43-a9b4-d19a5c516532'},
'21': {'uri': 'https://opencontext.org/subjects/c7049909-f2de-4b43-a9b4-d19a5c516532'},
'22': {'uri': 'https://opencontext.org/subjects/608f3452-daf4-4e93-b953-3adb06c7a0cb'},
'23': {'uri': 'https://opencontext.org/subjects/5870f6a9-dbb0-425d-9c8b-2424a9fa060a'},
'24': {'uri': 'https://opencontext.org/subjects/bf9a4138-7c96-4c54-8553-004444eec143'},
'25': {'uri': 'https://opencontext.org/subjects/ad8357b1-b46c-4bfe-a221-25b403dcef0f'},
'26': {'uri': 'https://opencontext.org/subjects/244e8a86-c472-47e2-baaf-fcfe3f67a014'},
'27': {'uri': 'https://opencontext.org/subjects/7de5e185-77fb-4ff5-b73b-b47b870acae2'},
'28': {'uri': 'https://opencontext.org/subjects/d91c02df-bc3c-476a-a48e-6eb735397692'},
'3': {'uri': 'https://opencontext.org/subjects/25A27283-05AF-42E2-C839-3D8605EEC6BD'},
'4': {'uri': 'https://opencontext.org/subjects/25A27283-05AF-42E2-C839-3D8605EEC6BD'},
'5': {'uri': 'https://opencontext.org/subjects/25A27283-05AF-42E2-C839-3D8605EEC6BD'},
'6': {'uri': 'https://opencontext.org/subjects/25A27283-05AF-42E2-C839-3D8605EEC6BD'},
'7': {'uri': 'https://opencontext.org/subjects/25A27283-05AF-42E2-C839-3D8605EEC6BD'},
'8': {'uri': 'https://opencontext.org/subjects/F6E97C59-EE6F-4824-863E-6596AA68BE2D'},
'9': {'uri': 'https://opencontext.org/subjects/F6E97C59-EE6F-4824-863E-6596AA68BE2D'},
}
id_prop = 'PolygonID'
gimp.save_partial_clean_file(json_obj,
'pc-geo', 'vesco_trenches_2017_4326.geojson',
id_prop, ok_ids=False, add_props=vdm_props, combine_json_obj=None)
gimp.load_into_importer = False
gimp.process_features_in_file('pc-geo', 'id-clean-coord-vesco_trenches_2017_4326.geojson')
from opencontext_py.apps.imports.geojson.geojson import GeoJSONimport
gimp = GeoJSONimport()
gimp.load_into_importer = False
gimp.project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
gimp.source_id = 'pc_trenches_2017_4326'
pc_json_obj = gimp.load_json_file('pc-geo', 'pc_trenches_2017_4326.geojson')
pc_props = {
'1': {'uri': 'https://opencontext.org/subjects/17085BC0-4FA1-4236-6426-4861AD48B584'},
'10': {'uri': 'https://opencontext.org/subjects/87E9B5C3-0828-4F60-5F9A-DB48CCAB3CCA'},
'100': {'uri': 'https://opencontext.org/subjects/A386907E-C61D-4AC4-068D-77F3D2ADFA3E'},
'101': {'uri': 'https://opencontext.org/subjects/7E17DFBB-8F0F-4A19-3F74-F9268FBD6813'},
'102': {'uri': 'https://opencontext.org/subjects/EF628D6D-2A2D-4E6B-F86A-834A451F8296'},
'103': {'uri': 'https://opencontext.org/subjects/4B45D8C9-AF08-4518-1E80-321D2DBDB074'},
'104': {'uri': 'https://opencontext.org/subjects/6B8C2C81-B703-4D15-F23E-50A38DD4A387'},
'105': {'uri': 'https://opencontext.org/subjects/4BC62D10-69D8-49DB-2CA9-7C3DFD74E0C8'},
'106': {'uri': 'https://opencontext.org/subjects/8D224FCC-D368-4993-BC69-A4EDCBCA60D4'},
'107': {'uri': 'https://opencontext.org/subjects/AC6DFAF1-8E69-480D-A687-AFDA354FCFF5'},
'108': {'uri': 'https://opencontext.org/subjects/11E97C36-9CC5-4616-3C82-771E1EF62BD9'},
'109': {'uri': 'https://opencontext.org/subjects/AAB28E39-CEBE-455C-F495-CF7B408971FE'},
'11': {'uri': 'https://opencontext.org/subjects/500EC0EE-1078-4BE4-762D-3C66FCF853C2'},
'110': {'uri': 'https://opencontext.org/subjects/AAB28E39-CEBE-455C-F495-CF7B408971FE'},
'111': {'uri': 'https://opencontext.org/subjects/D68CE259-7E7B-4AE4-590A-4E5EA96265FC'},
'112': {'uri': 'https://opencontext.org/subjects/67905CDA-1882-4B23-F0D5-68C45FD0B862'},
'113': {'uri': 'https://opencontext.org/subjects/2F24BD3F-2E57-4645-E593-7BA1DB674321'},
'114': {'uri': 'https://opencontext.org/subjects/C334AB97-3C6F-4D01-BA1D-C245A73EDE34'},
'115': {'uri': 'https://opencontext.org/subjects/658B0F2F-0B65-4013-75E5-0056683D0AC8'},
'116': {'uri': 'https://opencontext.org/subjects/5376A5D0-B805-4CEE-490F-78A659777449'},
'117': {'uri': 'https://opencontext.org/subjects/C7EAC347-78B3-4FC2-16E2-561ACBA7D836'},
'118': {'uri': 'https://opencontext.org/subjects/A63E424C-A2AF-469B-A85D-AC1CC690B2A9'},
'119': {'uri': 'https://opencontext.org/subjects/6B98F59B-D05F-4B47-0761-E13EE798C019'},
'12': {'uri': 'https://opencontext.org/subjects/A5049327-BCCE-43B2-AA50-2FBEBD620BDF'},
'120': {'uri': 'https://opencontext.org/subjects/BF820055-F9BB-4C0B-F214-9495E60B7161'},
'121': {'uri': 'https://opencontext.org/subjects/0AC6B338-54BA-41FB-D5D8-59D8227AC31C'},
'122': {'uri': 'https://opencontext.org/subjects/30114327-F531-4D7D-2906-A91B80C2A595'},
'123': {'uri': 'https://opencontext.org/subjects/E89A8B35-2975-4E7A-4B07-5DC7E17F29E9'},
'124': {'uri': 'https://opencontext.org/subjects/DB8A88C9-CA9E-44DD-9E67-ACA26D5AE831'},
'125': {'uri': 'https://opencontext.org/subjects/38EB7FE3-D403-4515-D17D-ECCEE1472E3A'},
'126': {'uri': 'https://opencontext.org/subjects/14BE93AE-020D-42E2-24AD-604BE3C60F89'},
'127': {'uri': 'https://opencontext.org/subjects/9259D8F1-C05C-497F-07C4-E5803BA00709'},
'128': {'uri': 'https://opencontext.org/subjects/136A7CDF-13D8-4BAD-4E71-01CB93C4056F'},
'129': {'uri': 'https://opencontext.org/subjects/3B24EA00-CA9E-4762-52A2-5A170BCE1402'},
'13': {'uri': 'https://opencontext.org/subjects/60F9B8C2-7279-4BF5-9BD1-A0A1C3747017'},
'130': {'uri': 'https://opencontext.org/subjects/77830F25-F564-4094-19B9-7DB8ABA08945'},
'131': {'uri': 'https://opencontext.org/subjects/3427ED52-FEA9-4E28-E250-4ECF923A116A'},
'132': {'uri': 'https://opencontext.org/subjects/3E90BC2C-C826-4A12-6382-131B825BFA27'},
'133': {'uri': 'https://opencontext.org/subjects/B0F889B2-6D71-4022-F311-CAA9919A9ECC'},
'134': {'uri': 'https://opencontext.org/subjects/BCDC5D9E-BB8B-4159-F6B0-2A9474C123BD'},
'135': {'uri': 'https://opencontext.org/subjects/039766CC-521F-46B6-2B98-40210F7DA8F1'},
'136': {'uri': 'https://opencontext.org/subjects/56201674-F881-47CC-7000-B2C52FFE9E3A'},
'137': {'uri': 'https://opencontext.org/subjects/54252D07-A291-4B75-6C6E-AD4E831F564C'},
'138': {'uri': 'https://opencontext.org/subjects/54252D07-A291-4B75-6C6E-AD4E831F564C'},
'139': {'uri': 'https://opencontext.org/subjects/6621600B-3B5D-4B99-71F9-E42C18656E59'},
'14': {'uri': 'https://opencontext.org/subjects/3C36CD47-17FE-4390-AA8B-0BA9AF8C0A79'},
'140': {'uri': 'https://opencontext.org/subjects/4454F19D-C295-4E35-7124-98A6661CFF51'},
'141': {'uri': 'https://opencontext.org/subjects/646F3438-9BBF-46F1-71E9-0BEC2E095EA4'},
'142': {'uri': 'https://opencontext.org/subjects/847268D2-B10F-4CBB-FFEC-577630572C9C'},
'143': {'uri': 'https://opencontext.org/subjects/847268D2-B10F-4CBB-FFEC-577630572C9C'},
'144': {'uri': 'https://opencontext.org/subjects/847268D2-B10F-4CBB-FFEC-577630572C9C'},
'145': {'uri': 'https://opencontext.org/subjects/99CB9EB0-1EDA-412D-7FBD-04BCD5E44DCF'},
'146': {'uri': 'https://opencontext.org/subjects/C7823AAC-4E80-48B0-A6E6-B772C58873A4'},
'147': {'uri': 'https://opencontext.org/subjects/7C56C14F-9336-443D-4D1F-602299831244'},
'148': {'uri': 'https://opencontext.org/subjects/7C56C14F-9336-443D-4D1F-602299831244'},
'149': {'uri': 'https://opencontext.org/subjects/8A9B8A77-37BB-4BF8-DC57-D30EDB73C629'},
'15': {'uri': 'https://opencontext.org/subjects/A08A8F9B-DFAC-436C-6D6E-F08EDE4C40DB'},
'150': {'uri': 'https://opencontext.org/subjects/2FED46FB-BCF6-439B-B15E-307881DF5011'},
'151': {'uri': 'https://opencontext.org/subjects/4C9DE9EB-687B-41CD-E5DC-90CD3B55ADFA'},
'152': {'uri': 'https://opencontext.org/subjects/444151E3-24F0-421F-9BD9-6E29DE9F62C7'},
'153': {'uri': 'https://opencontext.org/subjects/444151E3-24F0-421F-9BD9-6E29DE9F62C7'},
'154': {'uri': 'https://opencontext.org/subjects/4AC3D7E6-8B05-480A-320E-B2AA1266759A'},
'155': {'uri': 'https://opencontext.org/subjects/78BB679E-20F6-499C-C274-16BB6A3CD53D'},
'156': {'uri': 'https://opencontext.org/subjects/B98E614B-DA64-41BE-1CE8-9E5D3496C96D'},
'157': {'uri': 'https://opencontext.org/subjects/A13A7840-04F0-492A-C8E3-C1EA488B006C'},
'158': {'uri': 'https://opencontext.org/subjects/F05B2B91-CCD4-4ED0-F2BF-F1E2DBEA354B'},
'159': {'uri': 'https://opencontext.org/subjects/F05B2B91-CCD4-4ED0-F2BF-F1E2DBEA354B'},
'16': {'uri': 'https://opencontext.org/subjects/BBEF5707-C16F-4BFD-932E-5D9B83F6D9C4'},
'160': {'uri': 'https://opencontext.org/subjects/E2E20765-3196-4443-E62C-8C7799708399'},
'161': {'uri': 'https://opencontext.org/subjects/04D907B0-A850-4129-03FB-CBABA554171F'},
'162': {'uri': 'https://opencontext.org/subjects/BE445D1C-6756-4B3B-4691-5D749A93F7FB'},
'163': {'uri': 'https://opencontext.org/subjects/81EDC6F0-153D-4881-3AE7-25E4A8E20806'},
'164': {'uri': 'https://opencontext.org/subjects/A3CDCB91-5B30-4305-EFF7-1014AB258A0E'},
'165': {'uri': 'https://opencontext.org/subjects/09E36D34-E5DA-432E-9367-057D3ECF26F6'},
'166': {'uri': 'https://opencontext.org/subjects/1486CEF9-FB7E-48A5-1468-306C069785BD'},
'167': {'uri': 'https://opencontext.org/subjects/4500CA4A-4B5A-41E8-5337-1C77990375AF'},
'168': {'uri': 'https://opencontext.org/subjects/A5E7AF56-BB36-4490-2EEB-9F9EA2AE1368'},
'169': {'uri': 'https://opencontext.org/subjects/0BD521E1-A442-48DC-C45F-4CA748597550'},
'17': {'uri': 'https://opencontext.org/subjects/476CA37D-D70D-443E-45AD-D58B57F9CCEF'},
'170': {'uri': 'https://opencontext.org/subjects/E25F27C3-CDFC-4D39-9B9A-A749A8A80F64'},
'171': {'uri': 'https://opencontext.org/subjects/E9298DF1-5F47-4BC6-7086-75B9B5E0E880'},
'172': {'uri': 'https://opencontext.org/subjects/8EF7AA6F-6CE3-4B45-B488-302E52BB9615'},
'173': {'uri': 'https://opencontext.org/subjects/36AA7F2A-F090-43A2-B5C8-A18B16276E6B'},
'174': {'uri': 'https://opencontext.org/subjects/9D983826-70DE-48F2-A145-B33A171E8928'},
'175': {'uri': 'https://opencontext.org/subjects/5C8BA00A-FD53-4D68-8BA8-8A4A17F4CBD6'},
'176': {'uri': 'https://opencontext.org/subjects/66EBB4F3-2E71-49D9-002D-EE6079231F29'},
'177': {'uri': 'https://opencontext.org/subjects/6EC9E1CA-1595-4B39-DE94-07669E96D51B'},
'178': {'uri': 'https://opencontext.org/subjects/5879C9ED-5FCF-4062-4477-FA5B44250143'},
'179': {'uri': 'https://opencontext.org/subjects/373B16B4-8E0F-4008-8D24-43685AA72064'},
'18': {'uri': 'https://opencontext.org/subjects/3D84308C-A4D6-4F47-263F-243596729D25'},
'180': {'uri': 'https://opencontext.org/subjects/905A30A8-A71D-44DF-3181-437816B64864'},
'181': {'uri': 'https://opencontext.org/subjects/09AE421A-F835-4C9B-F74E-45DD9F270E9D'},
'182': {'uri': 'https://opencontext.org/subjects/905A30A8-A71D-44DF-3181-437816B64864'},
'183': {'uri': 'https://opencontext.org/subjects/F143FA0B-93C8-435F-D34A-BBF4725708AF'},
'184': {'uri': 'https://opencontext.org/subjects/8E75346E-ECCB-4BF0-77C1-C33BCB95D18D'},
'185': {'uri': 'https://opencontext.org/subjects/CEFDF473-5B63-4B3F-B16F-C4E77E03F2BE'},
'186': {'uri': 'https://opencontext.org/subjects/1B0D3E3D-14BE-4951-990B-76835166281C'},
'187': {'uri': 'https://opencontext.org/subjects/A0E5EDD4-7388-450B-DF1C-68203EDE247A'},
'188': {'uri': 'https://opencontext.org/subjects/AC085185-FEDB-4A4B-D077-FCEAC65730D0'},
'189': {'uri': 'https://opencontext.org/subjects/5A9DE9D3-9A86-494F-04C9-487D17FA9D2C'},
'19': {'uri': 'https://opencontext.org/subjects/8FAC8645-1EA7-4C7B-60AD-5ECEEB808DC2'},
'190': {'uri': 'https://opencontext.org/subjects/E8BDB401-722D-45D4-DC6D-52A041BB1C35'},
'191': {'uri': 'https://opencontext.org/subjects/7251FBF7-631D-44C3-C0B6-5753A0E24659'},
'192': {'uri': 'https://opencontext.org/subjects/3AA9E720-0371-4443-5A40-5FE6A5E1DCAE'},
'193': {'uri': 'https://opencontext.org/subjects/E5E80BD3-81E5-4215-29E8-602D59A2B8D8'},
'194': {'uri': 'https://opencontext.org/subjects/51CA7A3E-5D5A-43F6-9B15-937DEC71AB63'},
'195': {'uri': 'https://opencontext.org/subjects/BA2D283E-9EC4-47AE-4B5E-3C933662B0FF'},
'196': {'uri': 'https://opencontext.org/subjects/041BC004-3A2B-4821-CC1A-F5FFC283E537'},
'197': {'uri': 'https://opencontext.org/subjects/6CA4122D-0AF2-4F2B-D8BF-C77584AD386D'},
'198': {'uri': 'https://opencontext.org/subjects/9FF8641D-C19B-4DFB-4DE9-65300B0AF24F'},
'199': {'uri': 'https://opencontext.org/subjects/EB236367-CCE3-4A53-EB80-351E2E598E90'},
'2': {'uri': 'https://opencontext.org/subjects/855F361E-68D8-4BBE-5895-B351F48E8233'},
'20': {'uri': 'https://opencontext.org/subjects/DFA0AB59-9398-46B7-3955-5DA0439DFED9'},
'200': {'uri': 'https://opencontext.org/subjects/3D75850B-7ECD-4675-B297-3FF2F5106D97'},
'201': {'uri': 'https://opencontext.org/subjects/3ECF9E3D-995B-4156-6AB7-87A8BA5BCA99'},
'202': {'uri': 'https://opencontext.org/subjects/3ECF9E3D-995B-4156-6AB7-87A8BA5BCA99'},
'203': {'uri': 'https://opencontext.org/subjects/AB508F4F-DF85-45D9-C8B1-3147603347E5'},
'204': {'uri': 'https://opencontext.org/subjects/AB508F4F-DF85-45D9-C8B1-3147603347E5'},
'205': {'uri': 'https://opencontext.org/subjects/171AA8D2-BC5F-4D17-F3B0-5A49DD4E3DB2'},
'206': {'uri': 'https://opencontext.org/subjects/0888EE5A-F13A-4E8B-F5FF-959DA485F55B'},
'207': {'uri': 'https://opencontext.org/subjects/DE5EFA76-4851-4AFC-DF09-467D249AB1B9'},
'208': {'uri': 'https://opencontext.org/subjects/41EC3704-2F60-4CA1-07C3-FBB954170F67'},
'209': {'uri': 'https://opencontext.org/subjects/E31ABF39-8A5C-4AC3-C0DE-ACCD8E741502'},
'21': {'uri': 'https://opencontext.org/subjects/EC80D676-F07E-4174-E781-8795D4BF599A'},
'210': {'uri': 'https://opencontext.org/subjects/42A76947-22B6-4AC1-C96B-46A543A98DFC'},
'211': {'uri': 'https://opencontext.org/subjects/8AC933DF-D0C3-479A-9D7F-1BD5854AC9C6'},
'212': {'uri': 'https://opencontext.org/subjects/8AC933DF-D0C3-479A-9D7F-1BD5854AC9C6'},
'213': {'uri': 'https://opencontext.org/subjects/BFA79728-C24A-431D-94A4-EA96FBCD266A'},
'214': {'uri': 'https://opencontext.org/subjects/1F718097-88C2-4292-3396-CDF0C832CDF8'},
'215': {'uri': 'https://opencontext.org/subjects/D5F8DC06-7FFD-4309-54BD-A690E1CEC570'},
'216': {'uri': 'https://opencontext.org/subjects/B9276437-4E36-4E08-2D09-7FAA0AE7B7D2'},
'217': {'uri': 'https://opencontext.org/subjects/B91809DA-A28D-438B-F735-636D941825E1'},
'218': {'uri': 'https://opencontext.org/subjects/53EB84D5-7033-4C0A-3473-1703AC111198'},
'219': {'uri': 'https://opencontext.org/subjects/F81F3432-2A5C-40E4-CE78-B40E4BE7A3F7'},
'22': {'uri': 'https://opencontext.org/subjects/7251FBF7-631D-44C3-C0B6-5753A0E24659'},
'220': {'uri': 'https://opencontext.org/subjects/97712850-B663-425C-54BD-914A6AD5AC95'},
'221': {'uri': 'https://opencontext.org/subjects/8A817258-0759-47E8-0B1E-FFAC21AD9D3E'},
'222': {'uri': 'https://opencontext.org/subjects/1427AB95-F567-4A47-630B-094E82651A49'},
'223': {'uri': 'https://opencontext.org/subjects/5E978F37-6FF0-4E05-7D7E-62B5AB2C918F'},
'224': {'uri': 'https://opencontext.org/subjects/D0825B7B-9B4F-49F4-A455-88D951C8FADE'},
'225': {'uri': 'https://opencontext.org/subjects/E56EEAAE-AE6D-49A5-B2AE-6ED1E4D38655'},
'226': {'uri': 'https://opencontext.org/subjects/32B61533-BE8C-481D-CFD9-31C7AA2DB5B0'},
'227': {'uri': 'https://opencontext.org/subjects/85401603-A817-48C1-E331-9C63BA143CC9'},
'228': {'uri': 'https://opencontext.org/subjects/A7F2CCBF-3A83-4AA0-2BD0-0E2E99900624'},
'229': {'uri': 'https://opencontext.org/subjects/19CFB9F3-77F7-4EDF-0B90-E9D6E76BCD77'},
'23': {'uri': 'https://opencontext.org/subjects/0F4B79C5-D28E-466D-1B6D-F16B9DDE4F1E'},
'230': {'uri': 'https://opencontext.org/subjects/7F063C00-F507-4EB3-A384-B487190459E4'},
'231': {'uri': 'https://opencontext.org/subjects/7F063C00-F507-4EB3-A384-B487190459E4'},
'232': {'uri': 'https://opencontext.org/subjects/302EA385-93D1-441E-617E-9B5D172EB9EA'},
'233': {'uri': 'https://opencontext.org/subjects/E06C5503-D300-4E11-9001-BA93C6F8FDAF'},
'234': {'uri': 'https://opencontext.org/subjects/93AB3DC0-E669-45E2-678F-4E41BF0664C4'},
'235': {'uri': 'https://opencontext.org/subjects/9736FA6A-3373-4429-50A3-1FB235E00649'},
'236': {'uri': 'https://opencontext.org/subjects/C004D54D-C176-4792-BE08-792D9195317A'},
'237': {'uri': 'https://opencontext.org/subjects/C004D54D-C176-4792-BE08-792D9195317A'},
'238': {'uri': 'https://opencontext.org/subjects/C004D54D-C176-4792-BE08-792D9195317A'},
'239': {'uri': 'https://opencontext.org/subjects/B663BA3E-CEF9-40FC-3546-4C5AC7122EFB'},
'24': {'uri': 'https://opencontext.org/subjects/6D6DA14B-CDE3-4A36-650D-A80A611C0689'},
'240': {'uri': 'https://opencontext.org/subjects/2B395B34-F1D9-4C59-EE39-97E3CE1831A2'},
'241': {'uri': 'https://opencontext.org/subjects/0AC6B338-54BA-41FB-D5D8-59D8227AC31C'},
'242': {'uri': 'https://opencontext.org/subjects/8161DF34-30A4-4B36-977F-ED68EA59CA5A'},
'243': {'uri': 'https://opencontext.org/subjects/6AE46285-0A75-4C91-0C8A-693540D22612'},
'244': {'uri': 'https://opencontext.org/subjects/6AE46285-0A75-4C91-0C8A-693540D22612'},
'245': {'uri': 'https://opencontext.org/subjects/6AE46285-0A75-4C91-0C8A-693540D22612'},
'246': {'uri': 'https://opencontext.org/subjects/1C8A3854-31C8-42DC-D4BB-254ECCE95B62'},
'247': {'uri': 'https://opencontext.org/subjects/1C8A3854-31C8-42DC-D4BB-254ECCE95B62'},
'248': {'uri': 'https://opencontext.org/subjects/17A80550-E9DA-4D5B-F69B-21A32B3C1458'},
'249': {'uri': 'https://opencontext.org/subjects/EF7C1C1F-7CDA-4D22-06D8-E0B6B1C6D9D8'},
'25': {'uri': 'https://opencontext.org/subjects/A6B061FA-C808-4282-D03E-8D80A26FF325'},
'250': {'uri': 'https://opencontext.org/subjects/E82D1F24-BFAD-49D7-7E19-007759D2E2C8'},
'251': {'uri': 'https://opencontext.org/subjects/E82D1F24-BFAD-49D7-7E19-007759D2E2C8'},
'252': {'uri': 'https://opencontext.org/subjects/4114E192-9D65-4123-2079-11BD3DD647E3'},
'253': {'uri': 'https://opencontext.org/subjects/5C7C5703-C751-4458-9611-3994A62AC974'},
'254': {'uri': 'https://opencontext.org/subjects/CCD880A0-1197-4B38-B82F-222C02D10C41'},
'255': {'uri': 'https://opencontext.org/subjects/211173B4-BA77-42E7-7A84-AFACA458D082'},
'256': {'uri': 'https://opencontext.org/subjects/F1DC6BC7-F794-4429-13FA-65D4D52475C5'},
'257': {'uri': 'https://opencontext.org/subjects/1B0D3E3D-14BE-4951-990B-76835166281C'},
'258': {'uri': 'https://opencontext.org/subjects/381CAEAA-235F-469D-160E-A03BB9B334F7'},
'259': {'uri': 'https://opencontext.org/subjects/18DD4EDD-CF3F-4829-A003-87AFA4ED6742'},
'26': {'uri': 'https://opencontext.org/subjects/3F1E1F75-3A5E-4105-1DD2-94C776DE493F'},
'260': {'uri': 'https://opencontext.org/subjects/97B3FB48-CF5E-4D97-6FC7-E6213BAE2CE7'},
'261': {'uri': 'https://opencontext.org/subjects/02E6EEFA-F1CE-4584-752D-44175C50626C'},
'262': {'uri': 'https://opencontext.org/subjects/9B6EB7FC-0506-4F28-22A5-D8C725E7C8A7'},
'263': {'uri': 'https://opencontext.org/subjects/4DAF1629-9562-4894-53BE-76AAA63E2BB3'},
'264': {'uri': 'https://opencontext.org/subjects/038DC277-D83A-492B-F5CE-AE13DCE3D50B'},
'265': {'uri': 'https://opencontext.org/subjects/403CDCF6-C5CF-4FC2-C66C-075F99EA91AD'},
'266': {'uri': 'https://opencontext.org/subjects/85BCD16C-705B-4CC5-C88A-52C280711A7B'},
'267': {'uri': 'https://opencontext.org/subjects/E651CBEF-B83D-4ED4-21FD-A8A9AE2C18BD'},
'268': {'uri': 'https://opencontext.org/subjects/8FD93D1C-264C-40B6-9A72-1F3EAB44C8B6'},
'269': {'uri': 'https://opencontext.org/subjects/2A1A12F4-F920-4A59-35C5-70D8D7FEC492'},
'27': {'uri': 'https://opencontext.org/subjects/644BB446-6A25-46E8-64C9-E8735E227E72'},
'270': {'uri': 'https://opencontext.org/subjects/9673D2A7-4426-412B-CDCB-1D74FAC4BD20'},
'271': {'uri': 'https://opencontext.org/subjects/3CD9CA1D-069B-4A65-3DFD-3D170AE8023C'},
'272': {'uri': 'https://opencontext.org/subjects/BE44A1B2-774B-4BB0-AFDF-4617996CFF76'},
'273': {'uri': 'https://opencontext.org/subjects/05CAB086-0E36-478C-F72C-22A15ABCFB19'},
'274': {'uri': 'https://opencontext.org/subjects/321D5BD9-70EE-4095-FD3E-C80B00CDEC78'},
'275': {'uri': 'https://opencontext.org/subjects/66213B47-0DE6-450B-10DF-D9143C5C8BC2'},
'276': {'uri': 'https://opencontext.org/subjects/034AA229-42A6-4B45-287E-E5C409981CF9'},
'277': {'uri': 'https://opencontext.org/subjects/83304A03-52E2-411F-4E36-DBEAC7A441D1'},
'278': {'uri': 'https://opencontext.org/subjects/F1360278-6001-43DF-F344-B11A536AF061'},
'279': {'uri': 'https://opencontext.org/subjects/E947308A-5F9A-41DD-E98A-D63BEA1CE1D9'},
'28': {'uri': 'https://opencontext.org/subjects/3F761187-D0A8-40CD-358E-2F7DB01DBA47'},
'280': {'uri': 'https://opencontext.org/subjects/EEB08529-F0BE-4311-53E7-1A4C8BCC200D'},
'281': {'uri': 'https://opencontext.org/subjects/938BCA2C-161D-4DC5-130B-C54C98EC8F82'},
'282': {'uri': 'https://opencontext.org/subjects/AFCE6C74-53DE-46B2-EF24-F4E1C5DAE3D6'},
'283': {'uri': 'https://opencontext.org/subjects/769885E1-C279-470D-5F7C-18AE42CB00AC'},
'284': {'uri': 'https://opencontext.org/subjects/C03933DF-64F7-4C78-75A3-980B41CB53D8'},
'285': {'uri': 'https://opencontext.org/subjects/7A31B205-C6D1-4D24-EBA4-D93D5732E28A'},
'286': {'uri': 'https://opencontext.org/subjects/10BFDD6B-7BD6-4714-F81F-2D7E52D243D6'},
'287': {'uri': 'https://opencontext.org/subjects/2042B811-D07C-4665-2CAB-6C9673E54127'},
'288': {'uri': 'https://opencontext.org/subjects/BA3576D9-DF16-4AE9-532A-841CCF20DF84'},
'289': {'uri': 'https://opencontext.org/subjects/00FABCF8-A821-4934-9A17-0C23DC2106B4'},
'29': {'uri': 'https://opencontext.org/subjects/03C05DB2-62DF-43B2-605B-56AEFA277051'},
'290': {'uri': 'https://opencontext.org/subjects/1107E1D5-DFAD-4D63-3DD2-1096AE61587D'},
'291': {'uri': 'https://opencontext.org/subjects/B19C4F26-ED8C-46BF-0A9A-2565484C9ED6'},
'292': {'uri': 'https://opencontext.org/subjects/8C7F1700-D3B1-408F-0BB7-D10B775F2B2E'},
'293': {'uri': 'https://opencontext.org/subjects/E84623B7-E3D6-4BC1-C5F7-4AD36E4D6E81'},
'294': {'uri': 'https://opencontext.org/subjects/F8D0AFB5-CC02-4756-E6A9-474692BE78C3'},
'295': {'uri': 'https://opencontext.org/subjects/F2E38A8D-1156-43CC-8EFC-875820D95FD4'},
'296': {'uri': 'https://opencontext.org/subjects/4D825459-8568-47A5-9A85-8510957AABB8'},
'297': {'uri': 'https://opencontext.org/subjects/9020295D-29DE-4709-1657-F077C4C2057E'},
'298': {'uri': 'https://opencontext.org/subjects/0D9788F5-D6FB-425C-D87E-252D6A1D2F55'},
'299': {'uri': 'https://opencontext.org/subjects/8FEC1F08-B91F-4F02-3065-42C80DFA828C'},
'3': {'uri': 'https://opencontext.org/subjects/BBDC297E-7AC1-4925-E775-6D1B75CC280C'},
'30': {'uri': 'https://opencontext.org/subjects/E2E20765-3196-4443-E62C-8C7799708399'},
'300': {'uri': 'https://opencontext.org/subjects/59109ADC-4DDB-4223-0BA1-E0C5593A20F7'},
'301': {'uri': 'https://opencontext.org/subjects/FD2FA15C-22AC-4585-360F-8FF50C98791F'},
'302': {'uri': 'https://opencontext.org/subjects/865868E6-B3F9-43D4-1EAF-B2FA1008C3A7'},
'303': {'uri': 'https://opencontext.org/subjects/FE6761C0-9514-48E9-CEAB-D8D7CD0A44F8'},
'304': {'uri': 'https://opencontext.org/subjects/4595251B-B67A-4132-443B-0000E2044C85'},
'305': {'uri': 'https://opencontext.org/subjects/FF55F04E-E14E-4230-7155-6DBCDAAFAB19'},
'306': {'uri': 'https://opencontext.org/subjects/21EE7A84-703B-49DA-60FB-D7FAFA83FD6E'},
'307': {'uri': 'https://opencontext.org/subjects/1F380C32-EDA5-4F74-59CF-05BC44055468'},
'308': {'uri': 'https://opencontext.org/subjects/A03E5EA3-CC6F-4BAE-3FA0-D7E8931ADCF5'},
'309': {'uri': 'https://opencontext.org/subjects/1E07D087-DE44-4B52-0C69-21481B1E6816'},
'31': {'uri': 'https://opencontext.org/subjects/5B5550EC-78D2-4766-2E71-D54E3A4AAC6A'},
'310': {'uri': 'https://opencontext.org/subjects/F17D401C-A890-49B3-972F-C516DA9DFC8D'},
'311': {'uri': 'https://opencontext.org/subjects/E5E80BD3-81E5-4215-29E8-602D59A2B8D8'},
'312': {'uri': 'https://opencontext.org/subjects/6F340337-95B3-48E1-D09B-D5D7FF9700E0'},
'313': {'uri': 'https://opencontext.org/subjects/24878B9F-E641-49A6-E7BE-F536303A96BD'},
'314': {'uri': 'https://opencontext.org/subjects/BAC52ED6-040A-4E6B-95CF-3FE40592A951'},
'315': {'uri': 'https://opencontext.org/subjects/E72D3E97-C3FD-4C21-8B4D-6F153221E59F'},
'316': {'uri': 'https://opencontext.org/subjects/9879A755-C08A-498B-3DA2-283E5D8A4346'},
'317': {'uri': 'https://opencontext.org/subjects/FDD41CEF-BCE8-4EF1-53AA-A702F71D081E'},
'318': {'uri': 'https://opencontext.org/subjects/8B1452F5-ABF0-4F10-9196-9964B1E10C8F'},
'319': {'uri': 'https://opencontext.org/subjects/13781DFA-0D2C-41FC-8D12-EF779F960074'},
'32': {'uri': 'https://opencontext.org/subjects/7A2631C5-ABBD-46F6-6A6B-0C23EA819370'},
'320': {'uri': 'https://opencontext.org/subjects/BA4DD0AC-D9FC-43AE-B343-7A100244FCE0'},
'321': {'uri': 'https://opencontext.org/subjects/BA4DD0AC-D9FC-43AE-B343-7A100244FCE0'},
'322': {'uri': 'https://opencontext.org/subjects/BA4DD0AC-D9FC-43AE-B343-7A100244FCE0'},
'323': {'uri': 'https://opencontext.org/subjects/C1EFFCC9-3FBD-46C0-2160-38A6273DFDEB'},
'324': {'uri': 'https://opencontext.org/subjects/D1385646-A2B3-4A24-35A8-E7FE4D2B9E8E'},
'325': {'uri': 'https://opencontext.org/subjects/C1769D8F-A55C-4E1A-4398-8CDC1524A589'},
'326': {'uri': 'https://opencontext.org/subjects/F1C8EDE7-B1AB-41FE-998F-820C5F85074C'},
'327': {'uri': 'https://opencontext.org/subjects/DDF93AE8-2CB7-4FC8-0D49-A204D6D19451'},
'328': {'uri': 'https://opencontext.org/subjects/C4E13506-CCFB-4538-8307-412EAB609CAB'},
'329': {'uri': 'https://opencontext.org/subjects/AC23FDFA-A661-4EAF-5B68-64409F2DEE3C'},
'33': {'uri': 'https://opencontext.org/subjects/3A6C5062-5C6A-4886-87CD-DD15B4636809'},
'330': {'uri': 'https://opencontext.org/subjects/1715F935-44F4-46B0-458A-CA73117B66F7'},
'331': {'uri': 'https://opencontext.org/subjects/3B1B5BDC-9CEA-4F21-46CF-A1698373CEDE'},
'332': {'uri': 'https://opencontext.org/subjects/C6841408-C4D0-4F1F-B163-0F093BD2EC6F'},
'333': {'uri': 'https://opencontext.org/subjects/44D03EAE-759A-4C90-0B20-CAB6BC8E010F'},
'334': {'uri': 'https://opencontext.org/subjects/44D03EAE-759A-4C90-0B20-CAB6BC8E010F'},
'335': {'uri': 'https://opencontext.org/subjects/88085F60-2240-4894-166D-11233971C5CA'},
'336': {'uri': 'https://opencontext.org/subjects/88085F60-2240-4894-166D-11233971C5CA'},
'337': {'uri': 'https://opencontext.org/subjects/9F5548EF-C05A-471A-2531-99D056C0392E'},
'338': {'uri': 'https://opencontext.org/subjects/9F5548EF-C05A-471A-2531-99D056C0392E'},
'339': {'uri': 'https://opencontext.org/subjects/8C6D0960-E44D-4C77-EB65-B1BF15B23C1C'},
'34': {'uri': 'https://opencontext.org/subjects/E7750CB4-79E0-423C-6F71-4B991EB487DD'},
'340': {'uri': 'https://opencontext.org/subjects/CF45634D-C744-4FBE-CB30-DE94362392F5'},
'341': {'uri': 'https://opencontext.org/subjects/BB5078E0-E3D7-4876-7115-AB3C1730E483'},
'342': {'uri': 'https://opencontext.org/subjects/B79DEFA4-EB3A-4E9D-DB25-711792610C21'},
'343': {'uri': 'https://opencontext.org/subjects/1F515D40-4915-4B7C-A644-07ADDD89ACD4'},
'344': {'uri': 'https://opencontext.org/subjects/9F5D7919-2266-4617-40B9-45CAFA9B178E'},
'345': {'uri': 'https://opencontext.org/subjects/82001ECC-766E-4B9F-2FB0-AF5F30722583'},
'346': {'uri': 'https://opencontext.org/subjects/368588E0-9A4F-4E00-2CF8-C4DA561557BF'},
'347': {'uri': 'https://opencontext.org/subjects/7C536C68-D9B1-4B34-01E9-B8187E757580'},
'348': {'uri': 'https://opencontext.org/subjects/B07D574D-336E-4FC5-B9BE-59136145193D'},
'349': {'uri': 'https://opencontext.org/subjects/7DD8061F-FCC1-4E69-C8D6-50FCB1CBF2A0'},
'35': {'uri': 'https://opencontext.org/subjects/DA41F536-AD71-413E-048C-C572046069B4'},
'350': {'uri': 'https://opencontext.org/subjects/A9F9AD0A-3A54-49D5-21B1-2CB331F82966'},
'351': {'uri': 'https://opencontext.org/subjects/BF12F33F-C18C-4F84-0470-B64F86679044'},
'352': {'uri': 'https://opencontext.org/subjects/BF12F33F-C18C-4F84-0470-B64F86679044'},
'353': {'uri': 'https://opencontext.org/subjects/51EE0E7F-DC67-4911-2E53-57AF8F5068B9'},
'354': {'uri': 'https://opencontext.org/subjects/49F45331-EB08-4E19-FA31-4C9AC639C388'},
'355': {'uri': 'https://opencontext.org/subjects/8DC1810D-17C9-4064-D1AC-526CE4FB1B69'},
'356': {'uri': 'https://opencontext.org/subjects/51811248-1469-45CE-EA1C-33B61A244055'},
'357': {'uri': 'https://opencontext.org/subjects/E9425F5E-9AEC-43F0-E0A9-2D64F97F5D2A'},
'358': {'uri': 'https://opencontext.org/subjects/122F81BF-29C7-4B62-0E43-E68059D21B8C'},
'359': {'uri': 'https://opencontext.org/subjects/171AA8D2-BC5F-4D17-F3B0-5A49DD4E3DB2'},
'36': {'uri': 'https://opencontext.org/subjects/0D1E2474-6E46-4348-AD85-800AB9F3D80D'},
'360': {'uri': 'https://opencontext.org/subjects/83E9F7D3-4B39-4734-476B-F64E3AB4D584'},
'361': {'uri': 'https://opencontext.org/subjects/ED527E93-E63F-4FA6-A9A1-407F9EC2F3C1'},
'362': {'uri': 'https://opencontext.org/subjects/68F088F1-A2C1-4072-A9AB-EE6001F30CF8'},
'363': {'uri': 'https://opencontext.org/subjects/388BB97A-C9B0-4EBB-FA5D-2A7B57460547'},
'364': {'uri': 'https://opencontext.org/subjects/905B4EA0-91A8-4B10-0020-7841C86953B0'},
'365': {'uri': 'https://opencontext.org/subjects/4B93CE0F-AD3C-45E2-A260-C52F71792F3A'},
'366': {'uri': 'https://opencontext.org/subjects/CA2126B2-51B5-43A6-FBE2-CEAF068934A7'},
'367': {'uri': 'https://opencontext.org/subjects/3AFAAAB3-24C6-4F5D-D993-DA4006935924'},
'368': {'uri': 'https://opencontext.org/subjects/BA2D283E-9EC4-47AE-4B5E-3C933662B0FF'},
'369': {'uri': 'https://opencontext.org/subjects/B1AAE78E-ACD4-4B1F-CA46-EF64F7310195'},
'37': {'uri': 'https://opencontext.org/subjects/10AEE2E8-FFB6-4CD7-A7E4-8434BD2A7287'},
'370': {'uri': 'https://opencontext.org/subjects/6E9A747F-A5CA-40B6-2E21-61EB5CF4E6CA'},
'371': {'uri': 'https://opencontext.org/subjects/f9d2cc31-9892-49d5-a0f8-4e6fb084833e'},
'372': {'uri': 'https://opencontext.org/subjects/00E9665A-780D-4390-CFBF-B48A970759CB'},
'373': {'uri': 'https://opencontext.org/subjects/2196C174-D15B-428D-BB25-B375B2CE1AA6'},
'374': {'uri': 'https://opencontext.org/subjects/0C5F1A52-D6F4-456B-6041-4604F7C54855'},
'375': {'uri': 'https://opencontext.org/subjects/cd3e05e0-cd2d-42f4-936b-4399af5c66d2'},
'376': {'uri': 'https://opencontext.org/subjects/525E6271-B403-495D-0956-A316F186DC65'},
'377': {'uri': 'https://opencontext.org/subjects/ecba8803-74d2-40ac-a63a-54ee3dce5d2c'},
'378': {'uri': 'https://opencontext.org/subjects/cf217633-6fba-4002-8dec-472a8416f1dd'},
'379': {'uri': 'https://opencontext.org/subjects/45C6D049-10DD-4FFF-78F4-08D6D32642FC'},
'38': {'uri': 'https://opencontext.org/subjects/282A6231-1FDA-4DD2-9E9B-BA5F25BC7E37'},
'380': {'uri': 'https://opencontext.org/subjects/0C5F1A52-D6F4-456B-6041-4604F7C54855'},
'381': {'uri': 'https://opencontext.org/subjects/254dc26d-0431-4e75-8c50-ab6016c0df7e'},
'382': {'uri': 'https://opencontext.org/subjects/78ca532e-d943-482a-bb88-3fc43862bdda'},
'383': {'uri': 'https://opencontext.org/subjects/5071035d-3771-48d5-89fd-f59c01f96e74'},
'384': {'uri': 'https://opencontext.org/subjects/5AD193FF-803A-4D25-273B-F874B29AF6EC'},
'385': {'uri': 'https://opencontext.org/subjects/1a6306d7-a0f3-4b45-9667-6854f9e21100'},
'386': {'uri': 'https://opencontext.org/subjects/23e7aafa-37d8-4e72-b92e-5e94fedfd207'},
'387': {'uri': 'https://opencontext.org/subjects/2f2d6d6e-a85e-4876-af9d-01e83668f5e9'},
'388': {'uri': 'https://opencontext.org/subjects/80c59cb5-d6ca-4b88-8e93-cab9c1e3798f'},
'389': {'uri': 'https://opencontext.org/subjects/4ba347eb-5401-407a-a65b-7206754259ad'},
'39': {'uri': 'https://opencontext.org/subjects/ECCC7A10-11C7-43DA-74E6-726210980A54'},
'390': {'uri': 'https://opencontext.org/subjects/4acaaff3-cab7-4391-940b-6f6cb9946186'},
'391': {'uri': 'https://opencontext.org/subjects/e7100971-7904-4848-9215-47d12309f089'},
'392': {'uri': 'https://opencontext.org/subjects/eccf1e45-88e2-4072-98a5-d04eaa7c3b70'},
'393': {'uri': 'https://opencontext.org/subjects/2189b8ab-a4a5-490a-83dd-3b9c2e2385d2'},
'394': {'uri': 'https://opencontext.org/subjects/254dc26d-0431-4e75-8c50-ab6016c0df7e'},
'395': {'uri': 'https://opencontext.org/subjects/2ab489e7-014b-4437-aeca-c243e019695a'},
'396': {'uri': 'https://opencontext.org/subjects/cc79062b-1b2c-417b-9295-299e89d26c97'},
'397': {'uri': 'https://opencontext.org/subjects/5071035d-3771-48d5-89fd-f59c01f96e74'},
'398': {'uri': 'https://opencontext.org/subjects/17924BE4-6A53-47C1-0E6A-D9C8F840F2D9'},
'399': {'uri': 'https://opencontext.org/subjects/61bd5ada-45dd-4cf3-b620-8ba839f85753'},
'4': {'uri': 'https://opencontext.org/subjects/D5464A7B-A7B0-4D11-368C-ECF47D1B9389'},
'40': {'uri': 'https://opencontext.org/subjects/736422CB-0786-43EC-C243-85E7CB2C7315'},
'400': {'uri': 'https://opencontext.org/subjects/61bd5ada-45dd-4cf3-b620-8ba839f85753'},
'401': {'uri': 'https://opencontext.org/subjects/61bd5ada-45dd-4cf3-b620-8ba839f85753'},
'402': {'uri': 'https://opencontext.org/subjects/9420190e-3d76-44cf-ae0f-279900c4a56c'},
'403': {'uri': 'https://opencontext.org/subjects/70a10d8f-5bec-46ab-aa02-6453e66e167e'},
'404': {'uri': 'https://opencontext.org/subjects/9420190e-3d76-44cf-ae0f-279900c4a56c'},
'405': {'uri': 'https://opencontext.org/subjects/f675c9ad-e4b3-4125-937e-21d64ed8fd30'},
'406': {'uri': 'https://opencontext.org/subjects/f675c9ad-e4b3-4125-937e-21d64ed8fd30'},
'407': {'uri': 'https://opencontext.org/subjects/bfc2ff8b-1d8a-49ba-bb5e-e64ae799a642'},
'408': {'uri': 'https://opencontext.org/subjects/bfc2ff8b-1d8a-49ba-bb5e-e64ae799a642'},
'409': {'uri': 'https://opencontext.org/subjects/bfc2ff8b-1d8a-49ba-bb5e-e64ae799a642'},
'41': {'uri': 'https://opencontext.org/subjects/67B642B7-C0AC-4ED7-FD60-4DC7E54861C5'},
'410': {'uri': 'https://opencontext.org/subjects/1f5549bf-2d1e-4eab-aa1c-a23e76cff1d4'},
'411': {'uri': 'https://opencontext.org/subjects/1f5549bf-2d1e-4eab-aa1c-a23e76cff1d4'},
'412': {'uri': 'https://opencontext.org/subjects/753ff890-7e5c-4854-93e8-0c1d40e1683a'},
'413': {'uri': 'https://opencontext.org/subjects/311c66b1-75d5-49c1-9593-481228048c07'},
'414': {'uri': 'https://opencontext.org/subjects/311c66b1-75d5-49c1-9593-481228048c07'},
'415': {'uri': 'https://opencontext.org/subjects/6ffcb588-d41e-4749-86db-351bcfba50c4'},
'417': {'uri': 'https://opencontext.org/subjects/7b01d4eb-7821-419e-9df4-d72d06513a6d'},
'418': {'uri': 'https://opencontext.org/subjects/7b01d4eb-7821-419e-9df4-d72d06513a6d'},
'419': {'uri': 'https://opencontext.org/subjects/503bb312-f0cc-4ecf-9443-66a81f9dc683'},
'42': {'uri': 'https://opencontext.org/subjects/03C2F519-5258-4880-C762-42673862AD9E'},
'420': {'uri': 'https://opencontext.org/subjects/dc245337-3117-4551-837e-42c02d297a2e'},
'421': {'uri': 'https://opencontext.org/subjects/dc245337-3117-4551-837e-42c02d297a2e'},
'422': {'uri': 'https://opencontext.org/subjects/dc245337-3117-4551-837e-42c02d297a2e'},
'423': {'uri': 'https://opencontext.org/subjects/28a6cc00-fbab-40f1-90e6-01c66376926b'},
'424': {'uri': 'https://opencontext.org/subjects/28a6cc00-fbab-40f1-90e6-01c66376926b'},
'425': {'uri': 'https://opencontext.org/subjects/284a39ef-3e9f-4dac-8dc9-87b94095f382'},
'426': {'uri': 'https://opencontext.org/subjects/284a39ef-3e9f-4dac-8dc9-87b94095f382'},
'427': {'uri': 'https://opencontext.org/subjects/284a39ef-3e9f-4dac-8dc9-87b94095f382'},
'428': {'uri': 'https://opencontext.org/subjects/d0ec09bd-399d-46f0-8ec9-8f5936044f1c'},
'429': {'uri': 'https://opencontext.org/subjects/db6cca7f-78cb-4043-adb4-c3de5d791910'},
'43': {'uri': 'https://opencontext.org/subjects/751B3F35-088B-46B2-71E7-A555D058491D'},
'430': {'uri': 'https://opencontext.org/subjects/a236e90a-f7b6-44b9-af1b-dc259071ab4f'},
'431': {'uri': 'https://opencontext.org/subjects/0a00d502-984f-4449-8bdc-8245093690f8'},
'432': {'uri': 'https://opencontext.org/subjects/5ccfc2a2-4139-41fd-9961-fdc03420e0bf'},
'433': {'uri': 'https://opencontext.org/subjects/3bd49b02-b954-46ad-8cc6-a0fa4ca48906'},
'434': {'uri': 'https://opencontext.org/subjects/3bd49b02-b954-46ad-8cc6-a0fa4ca48906'},
'435': {'uri': 'https://opencontext.org/subjects/991fd662-e8c0-4dfd-9b1f-d7233429ca3a'},
'436': {'uri': 'https://opencontext.org/subjects/ea552ea2-e2aa-4db3-9372-2377958369c4'},
'437': {'uri': 'https://opencontext.org/subjects/c8f7fff7-4681-4a40-a049-0f15e1a3602e'},
'438': {'uri': 'https://opencontext.org/subjects/7d0c9b3e-1d2a-4c6a-978a-edfa6e40fd2b'},
'439': {'uri': 'https://opencontext.org/subjects/7d0c9b3e-1d2a-4c6a-978a-edfa6e40fd2b'},
'44': {'uri': 'https://opencontext.org/subjects/5E429FD0-C205-4EB7-7E05-32D4B588E5E1'},
'440': {'uri': 'https://opencontext.org/subjects/b8233443-f716-4cfe-9b52-fe46a81eb610'},
'441': {'uri': 'https://opencontext.org/subjects/b8233443-f716-4cfe-9b52-fe46a81eb610'},
'442': {'uri': 'https://opencontext.org/subjects/ea552ea2-e2aa-4db3-9372-2377958369c4'},
'443': {'uri': 'https://opencontext.org/subjects/991fd662-e8c0-4dfd-9b1f-d7233429ca3a'},
'444': {'uri': 'https://opencontext.org/subjects/5ccfc2a2-4139-41fd-9961-fdc03420e0bf'},
'445': {'uri': 'https://opencontext.org/subjects/5ccfc2a2-4139-41fd-9961-fdc03420e0bf'},
'446': {'uri': 'https://opencontext.org/subjects/9420190e-3d76-44cf-ae0f-279900c4a56c'},
'447': {'uri': 'https://opencontext.org/subjects/7a32b56f-a7e0-4f0b-87cb-2126ccdee127'},
'448': {'uri': 'https://opencontext.org/subjects/910004b2-e13c-4084-9f66-04960123d1fe'},
'449': {'uri': 'https://opencontext.org/subjects/45760b29-e1ac-4a1e-9c11-71d071edea48'},
'45': {'uri': 'https://opencontext.org/subjects/85C5376B-08B0-429A-F106-D9386FA457B3'},
'450': {'uri': 'https://opencontext.org/subjects/45760b29-e1ac-4a1e-9c11-71d071edea48'},
'451': {'uri': 'https://opencontext.org/subjects/672bd5a3-ea71-4555-aa78-a7ce17bf51f8'},
'452': {'uri': 'https://opencontext.org/subjects/672bd5a3-ea71-4555-aa78-a7ce17bf51f8'},
'453': {'uri': 'https://opencontext.org/subjects/c766cfd8-e1b4-4a66-b6b2-08b2030bdcb8'},
'454': {'uri': 'https://opencontext.org/subjects/c766cfd8-e1b4-4a66-b6b2-08b2030bdcb8'},
'455': {'uri': 'https://opencontext.org/subjects/70a10d8f-5bec-46ab-aa02-6453e66e167e'},
'456': {'uri': 'https://opencontext.org/subjects/2d29ce46-1637-4f2e-9841-264d6eb14f9e'},
'457': {'uri': 'https://opencontext.org/subjects/511415d9-622c-4958-9445-44d509dacd04'},
'458': {'uri': 'https://opencontext.org/subjects/6f59d5c0-fa4f-4adc-a98b-6dca8ccb8d8e'},
'459': {'uri': 'https://opencontext.org/subjects/6ec338a9-36a4-4643-9d16-e01fc33c585f'},
'46': {'uri': 'https://opencontext.org/subjects/FEB576A3-12DE-4AE2-495D-87E662F23FF5'},
'460': {'uri': 'https://opencontext.org/subjects/f2661425-9eb5-45a2-ae01-d7bc63683db6'},
'461': {'uri': 'https://opencontext.org/subjects/1341c275-af80-4e8c-a9f4-1682efe5f022'},
'462': {'uri': 'https://opencontext.org/subjects/f7b150a8-7650-4d2b-a9b3-91168537a75d'},
'463': {'uri': 'https://opencontext.org/subjects/1b6f73ed-6173-4f3f-938e-a5e5b3402fd4'},
'464': {'uri': 'https://opencontext.org/subjects/ed8d8636-fd23-408a-86dc-b72ac2da068a'},
'465': {'uri': 'https://opencontext.org/subjects/4b503ad5-d1fd-486b-89d0-2b5c0d37e5ff'},
'466': {'uri': 'https://opencontext.org/subjects/03C2F519-5258-4880-C762-42673862AD9E'},
'467': {'uri': 'https://opencontext.org/subjects/8605e112-6af4-4b29-a50e-4ce4bb45801b'},
'468': {'uri': 'https://opencontext.org/subjects/2d73d4bc-bc05-4242-9896-b8cf3ca96346'},
'469': {'uri': 'https://opencontext.org/subjects/3f7dc67b-9640-4e09-8c63-42640b4b23c2'},
'47': {'uri': 'https://opencontext.org/subjects/5D60BE1B-57A7-4F37-7CA1-11E57A73FB31'},
'470': {'uri': 'https://opencontext.org/subjects/e6b4620a-c575-411a-b136-b4110755ac6d'},
'471': {'uri': 'https://opencontext.org/subjects/3689eb5d-e9a2-49e4-9be5-c9676080ec2b'},
'472': {'uri': 'https://opencontext.org/subjects/221f94cc-1031-400e-a583-2e9b2bb18104'},
'473': {'uri': 'https://opencontext.org/subjects/85b795fd-d5ef-406e-bce7-0a695b656fd6'},
'48': {'uri': 'https://opencontext.org/subjects/C0C7B0E4-6135-4A2D-5E6B-1285190F271F'},
'49': {'uri': 'https://opencontext.org/subjects/78DC8FFD-D170-4FF7-1D02-EA0C85A4539C'},
'5': {'uri': 'https://opencontext.org/subjects/9950174E-E880-4BEF-B797-358A82F4A372'},
'50': {'uri': 'https://opencontext.org/subjects/D5FF94F3-1D3C-4107-30D5-697C86409234'},
'51': {'uri': 'https://opencontext.org/subjects/D5FF94F3-1D3C-4107-30D5-697C86409234'},
'52': {'uri': 'https://opencontext.org/subjects/18DD4EDD-CF3F-4829-A003-87AFA4ED6742'},
'53': {'uri': 'https://opencontext.org/subjects/692CF461-0052-4019-BB8A-BB26D84E5F24'},
'54': {'uri': 'https://opencontext.org/subjects/3AA9E720-0371-4443-5A40-5FE6A5E1DCAE'},
'55': {'uri': 'https://opencontext.org/subjects/F5332A70-73BE-4E87-AD91-08F0ABF7BFB1'},
'56': {'uri': 'https://opencontext.org/subjects/3E140B66-8309-4931-FEF4-AD2FDF8D6291'},
'57': {'uri': 'https://opencontext.org/subjects/E94E6618-A7FC-4AFF-D004-69EEB3E8089A'},
'58': {'uri': 'https://opencontext.org/subjects/58FCC816-FF23-4890-06F6-DB80719BFF31'},
'59': {'uri': 'https://opencontext.org/subjects/9D2FD065-FA0F-4DA3-21FE-A2D3CA02E320'},
'6': {'uri': 'https://opencontext.org/subjects/CFE33CD1-B119-4D65-E2FF-8DAFABEAC941'},
'60': {'uri': 'https://opencontext.org/subjects/E405152C-1036-49AE-0A8B-A621B66EC7AB'},
'61': {'uri': 'https://opencontext.org/subjects/98BC67D5-770B-4C37-3950-A33170EB3A4F'},
'62': {'uri': 'https://opencontext.org/subjects/7CF7D7EB-D71C-45DC-82EA-921CDE5640B1'},
'63': {'uri': 'https://opencontext.org/subjects/8EA5B6D8-EC94-4B56-4D7B-440C67EFAFA3'},
'64': {'uri': 'https://opencontext.org/subjects/8EA5B6D8-EC94-4B56-4D7B-440C67EFAFA3'},
'65': {'uri': 'https://opencontext.org/subjects/8EA5B6D8-EC94-4B56-4D7B-440C67EFAFA3'},
'66': {'uri': 'https://opencontext.org/subjects/8EA5B6D8-EC94-4B56-4D7B-440C67EFAFA3'},
'67': {'uri': 'https://opencontext.org/subjects/8EA5B6D8-EC94-4B56-4D7B-440C67EFAFA3'},
'68': {'uri': 'https://opencontext.org/subjects/8EA5B6D8-EC94-4B56-4D7B-440C67EFAFA3'},
'69': {'uri': 'https://opencontext.org/subjects/9B688358-00F8-4091-E5D2-B2E0AB25ADBC'},
'7': {'uri': 'https://opencontext.org/subjects/CCAA1707-E736-4CC6-D2F7-F6DE62998FD4'},
'70': {'uri': 'https://opencontext.org/subjects/F6B4B592-664A-47A0-A5B8-F77EF4A48149'},
'71': {'uri': 'https://opencontext.org/subjects/4B85886B-03E4-48FE-41AB-D55457C5E3A9'},
'72': {'uri': 'https://opencontext.org/subjects/4B85886B-03E4-48FE-41AB-D55457C5E3A9'},
'73': {'uri': 'https://opencontext.org/subjects/4B85886B-03E4-48FE-41AB-D55457C5E3A9'},
'74': {'uri': 'https://opencontext.org/subjects/8F23631E-6201-4092-E5BE-5E7D536E2B35'},
'75': {'uri': 'https://opencontext.org/subjects/234F8218-4D67-4887-659E-D14785BC0A20'},
'76': {'uri': 'https://opencontext.org/subjects/1CD4609F-F5E5-4C2C-3DF7-CDCB87CB019E'},
'77': {'uri': 'https://opencontext.org/subjects/D1359970-4965-4819-DC03-61A7A7E4A850'},
'78': {'uri': 'https://opencontext.org/subjects/2680D449-6312-48AE-EF0F-DB25B67F7BFC'},
'79': {'uri': 'https://opencontext.org/subjects/C7E131A3-8BE2-40E3-0EF2-C3AB65823B63'},
'8': {'uri': 'https://opencontext.org/subjects/7940B16C-1C48-457D-937B-98EBB5CF4980'},
'80': {'uri': 'https://opencontext.org/subjects/7655E594-69C9-4438-1BED-5987F671D933'},
'81': {'uri': 'https://opencontext.org/subjects/6A9CF999-C6FA-42AA-8F2B-B07442FE2D44'},
'82': {'uri': 'https://opencontext.org/subjects/337FE71E-94CD-4FB5-872F-B1678343B812'},
'83': {'uri': 'https://opencontext.org/subjects/AB14B22E-C5AC-42E1-2BD3-0D7362D6DC59'},
'84': {'uri': 'https://opencontext.org/subjects/04734348-D45B-4957-BA5A-4BB8D9690140'},
'85': {'uri': 'https://opencontext.org/subjects/99F94829-B4F3-44CE-8A9B-A8236BECFBB4'},
'86': {'uri': 'https://opencontext.org/subjects/34A37E52-6950-4AE8-FCA8-3DE56973B33E'},
'87': {'uri': 'https://opencontext.org/subjects/8188FFAA-CFAD-47D6-2A12-23E4FF5DDBFB'},
'88': {'uri': 'https://opencontext.org/subjects/A66D8905-2236-4D2A-3151-F0C49F59B660'},
'89': {'uri': 'https://opencontext.org/subjects/150BD521-2E06-48C2-DB8A-B4F53AC3D321'},
'9': {'uri': 'https://opencontext.org/subjects/C26457DA-4B00-484E-F27E-85F2A2E88314'},
'90': {'uri': 'https://opencontext.org/subjects/8E1306DF-0CF7-409F-403A-A7B74C3B6BDE'},
'91': {'uri': 'https://opencontext.org/subjects/7BB80569-AEBF-4C8A-6F9F-581BAF999DE0'},
'92': {'uri': 'https://opencontext.org/subjects/7FC5291E-A036-4100-981B-3EE7DECCE097'},
'93': {'uri': 'https://opencontext.org/subjects/2D8BB7CF-18F6-464B-213A-ADA01818C70D'},
'94': {'uri': 'https://opencontext.org/subjects/E7C6B89A-0258-4A50-1487-C55AE8C4ED69'},
'95': {'uri': 'https://opencontext.org/subjects/B229A46D-ED3D-41A0-D64F-2E5855703E0B'},
'96': {'uri': 'https://opencontext.org/subjects/A35A67AA-2832-415E-F7BE-3B051444E665'},
'97': {'uri': 'https://opencontext.org/subjects/9D0D7D58-A751-4CC7-EF3D-5318D9D16E47'},
'98': {'uri': 'https://opencontext.org/subjects/20BEF152-BA8B-4A08-57D7-BAADD30A7248'},
'99': {'uri': 'https://opencontext.org/subjects/00F368CE-AFF2-43CA-489A-84A0EC2DFF8C'},
}
id_prop = 'PolygonID'
gimp.save_partial_clean_file(pc_json_obj,
'pc-geo', 'pc_trenches_2017_4326.geojson',
id_prop, ok_ids=False, add_props=pc_props, combine_json_obj=None)
gimp.load_into_importer = False
gimp.process_features_in_file('pc-geo', 'labeled-pc-trenches-2017-4326.geojson')
from opencontext_py.apps.ocitems.geospace.models import Geospace
uuid = '59CA9A4E-3D63-4596-0F53-383F286E59FF'
g = Geospace.objects.get(uuid=uuid)
g.latitude = 43.1524182334655
g.longitude = 11.401899321827992
g.coordinates = '[11.401899321827992,43.1524182334655]'
g.save()
from opencontext_py.apps.imports.geojson.geojson import GeoJSONimport
gimp = GeoJSONimport()
gimp.load_into_importer = False
gimp.project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
gimp.process_features_in_file('pc-geo', 'pc_artifacts_2017_4326.geojson')
from opencontext_py.apps.imports.geojson.geojson import GeoJSONimport
gimp = GeoJSONimport()
gimp.load_into_importer = False
gimp.project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
gimp.process_features_in_file('pc-geo', 'vesco_artifacts_2017_4326.geojson')
from opencontext_py.apps.imports.geojson.geojson import GeoJSONimport
gimp = GeoJSONimport()
gimp.load_into_importer = False
gimp.project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
gimp.source_id = 'pc_trenches_2017_4326'
id_prop = 'PolygonID'
ok_ids = False
json_obj = gimp.load_json_file('pc-geo', 'pc_trenches_2017_4326.geojson')
points = gimp.load_json_file('pc-geo', 'pc_artifacts_2017_4326.geojson')
gimp.save_partial_clean_file(json_obj,
'pc-geo', 'pc_trenches_2017_4326.geojson',
id_prop, ok_ids, add_props, points)
json_obj = gimp.load_json_file('pc-geo', 'id-clean-coord-pc_trenches_2017_4326.geojson')
gimp.save_no_coord_file(json_obj, 'pc-geo', 'id-clean-coord-pc_trenches_2017_4326.geojson')
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.imports.geojson.geojson import GeoJSONimport
from opencontext_py.apps.ocitems.geospace.models import Geospace
print('Delete old botany-areas geospatial data')
Geospace.objects\
.filter(source_id='botany-areas',
project_uuid='10aa84ad-c5de-4e79-89ce-d83b75ed72b5',
ftype__in=['Polygon', 'Multipolygon']).delete()
gimp = GeoJSONimport()
gimp.load_into_importer = False
gimp.project_uuid = '10aa84ad-c5de-4e79-89ce-d83b75ed72b5'
gimp.source_id = 'botany-areas'
id_prop = 'LocalArea'
ok_ids = False
projects=['10aa84ad-c5de-4e79-89ce-d83b75ed72b5', '5A6DDB94-70BE-43B4-2D5D-35D983B21515']
json_obj = gimp.load_json_file('giza-areas', 'botany-areas-revised.geojson')
rev_json = LastUpdatedOrderedDict()
rev_json['features'] = []
for feat in json_obj['features']:
area_name = feat['properties']['LocalArea']
if area_name == 'KKT-Nohas House':
area_name = "Noha's"
elif area_name == 'G1':
area_name = 'GI'
man_objs = Manifest.objects.filter(label=area_name, project_uuid__in=projects, class_uri='oc-gen:cat-area')[:1]
if len(man_objs):
feat['properties']['uri'] = 'http://opencontext.org/subjects/' + man_objs[0].uuid
rev_json['features'].append(feat)
else:
print('Cannot find: ' + area_name)
gimp.save_json_file(rev_json, 'giza-areas', 'botany-areas-revised-w-uris.geojson')
gimp.process_features_in_file('giza-areas', 'botany-areas-revised-w-uris.geojson')
gimp.save_no_coord_file(rev_json, 'giza-areas', 'id-clean-coord-botany-areas-revised-w-uris.geojson')
import json
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.apps.ocitems.geospace.models import Geospace, GeospaceGeneration
from opencontext_py.apps.imports.geojson.geojson import GeoJSONimport
from opencontext_py.libs.validategeojson import ValidateGeoJson
from opencontext_py.libs.globalmaptiles import GlobalMercator
from opencontext_py.libs.reprojection import ReprojectUtilities
import pyproj
from pyproj import Proj, transform
import numpy
import geojson
# TRAP Bulgaria
project_uuid = '24e2aa20-59e6-4d66-948b-50ee245a7cfc'
gimp = GeoJSONimport()
gimp.load_into_importer = False
gimp.project_uuid = project_uuid
json_obj = gimp.load_json_file('trap-geo', 'yam-survey-units.geojson')
new_geojson = LastUpdatedOrderedDict()
for key, vals in json_obj.items():
if key != 'features':
new_geojson[key] = vals
else:
new_geojson[key] = []
features = []
bad_features = []
reproj = ReprojectUtilities()
reproj.set_in_out_crs('EPSG:32635', 'EPSG:4326')
for feature in json_obj['features']:
id = str(feature['properties']['SUID'])
label = 'Survey Unit ' + id
print('Find: {}'.format(label))
try:
m_obj = Manifest.objects.get(project_uuid=project_uuid, label=label, item_type='subjects')
uuid = m_obj.uuid
except:
uuid = ''
print('--> {}'.format(uuid))
feature['properties']['uuid'] = uuid
if not isinstance(feature['geometry'], dict):
print(' ---- BAD FEATURE: {}'.format(label))
bad_features.append(feature)
continue
geometry_type = feature['geometry']['type']
coordinates = feature['geometry']['coordinates']
new_coordinates = reproj.reproject_multi_or_polygon(coordinates, geometry_type)
feature['geometry']['coordinates'] = new_coordinates
coord_str = json.dumps(new_coordinates,
indent=4,
ensure_ascii=False)
gg = GeospaceGeneration()
lon_lat = gg.get_centroid_lonlat_coordinates(coord_str,
feature['geometry']['type'])
longitude = float(lon_lat[0])
latitude = float(lon_lat[1])
feature['properties']['longitude'] = longitude
feature['properties']['latitude'] = latitude
gm = GlobalMercator()
feature['properties']['geo-tile'] = gm.lat_lon_to_quadtree(latitude, longitude, 20)
features.append(feature)
new_geojson['features'] = features
new_geojson['bad-features'] = bad_features
gimp.save_json_file(new_geojson, 'trap-geo', 'yam-survey-units-reproj-w-uuids.geojson')
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.manifest.models import Manifest
media_uuid = '6d42ad2a-cbc2-46e2-a72c-907607b6fe3c'
project_uuid = '10aa84ad-c5de-4e79-89ce-d83b75ed72b5'
Assertion.objects\
.filter(uuid=project_uuid,
predicate_uuid=Assertion.PREDICATES_GEO_OVERLAY)\
.delete()
media_man = Manifest.objects.get(uuid=media_uuid)
if not isinstance(media_man.sup_json, dict):
meta = LastUpdatedOrderedDict()
else:
meta = media_man.sup_json
meta['Leaflet'] = LastUpdatedOrderedDict()
meta['Leaflet']['bounds'] = [[31.138088, 29.972094], [31.135083, 29.973761]]
meta['Leaflet']['bounds'] = [[29.972094, 31.138088], [29.973761, 31.135083]]
meta['Leaflet']['label'] = 'Menkaure Valley Temple East Plan'
media_man.sup_json = meta
media_man.save()
Assertion.objects\
.filter(uuid=project_uuid,
predicate_uuid=Assertion.PREDICATES_GEO_OVERLAY)\
.delete()
ass = Assertion()
ass.uuid = '5A6DDB94-70BE-43B4-2D5D-35D983B21515'
ass.subject_type = 'projects'
ass.project_uuid = '5A6DDB94-70BE-43B4-2D5D-35D983B21515'
ass.source_id = 'test-geo-overlay'
ass.obs_node = '#obs-' + str(1)
ass.obs_num = 1
ass.sort = 1
ass.visibility = 1
ass.predicate_uuid = Assertion.PREDICATES_GEO_OVERLAY
ass.object_uuid = media_man.uuid
ass.object_type = media_man.item_type
ass.save()
ass = Assertion()
ass.uuid = project_uuid
ass.subject_type = 'projects'
ass.project_uuid = project_uuid
ass.source_id = 'test-geo-overlay'
ass.obs_node = '#obs-' + str(1)
ass.obs_num = 1
ass.sort = 1
ass.visibility = 1
ass.predicate_uuid = Assertion.PREDICATES_GEO_OVERLAY
ass.object_uuid = 'da676164-9829-4798-bb5d-c5b1135daa27'
ass.object_type = 'media'
ass.save()
from opencontext_py.apps.imports.geojson.geojson import GeoJSONimport
gimp = GeoJSONimport()
gimp.load_into_importer = False
gimp.project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
gimp.source_id = 'pc_trenches_2017_4326'
gimp.process_features_in_file('pc-geo', 'pc_trenches_2017_4326.geojson')
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.archive.binaries import ArchiveBinaries
arch_bin = ArchiveBinaries()
project_uuid = 'b6de18c6-bba8-4b53-9d9e-3eea4b794268'
arch_bin.save_project_binaries(project_uuid)
from opencontext_py.apps.archive.binaries import ArchiveBinaries
project_uuids = [
'b6de18c6-bba8-4b53-9d9e-3eea4b794268'
]
for project_uuid in project_uuids:
arch_bin = ArchiveBinaries()
arch_bin.save_project_binaries(project_uuid)
arch_bin.archive_all_project_binaries(project_uuid)
from opencontext_py.apps.archive.binaries import ArchiveBinaries
project_uuids = [
"DF043419-F23B-41DA-7E4D-EE52AF22F92F"
]
for project_uuid in project_uuids:
arch_bin = ArchiveBinaries()
arch_bin.temp_cache_dir = 'temp-cache'
arch_bin.max_repo_file_count = 2500
arch_bin.save_project_binaries(project_uuid)
arch_bin.archive_all_project_binaries(project_uuid)
from opencontext_py.apps.archive.binaries import ArchiveBinaries
project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
archive_dir = 'files-1-by---DF043419-F23B-41DA-7E4D-EE52AF22F92F'
deposition_id = 1251106
arch_bin.archive_dir_project_binaries(project_uuid, archive_dir, deposition_id)
from opencontext_py.apps.archive.binaries import ArchiveBinaries
project_uuid = "3F6DCD13-A476-488E-ED10-47D25513FCB2"
archive_dir = "files-4-by---3F6DCD13-A476-488E-ED10-47D25513FCB2"
deposition_id = 1242673
arch_bin = ArchiveBinaries()
arch_bin.archive_dir_project_binaries(project_uuid, archive_dir, deposition_id)
dirs = [
"files-5-by---3F6DCD13-A476-488E-ED10-47D25513FCB2",
"files-6-by---3F6DCD13-A476-488E-ED10-47D25513FCB2"
]
for archive_dir in dirs:
project_uuid = "3F6DCD13-A476-488E-ED10-47D25513FCB2"
arch_bin = ArchiveBinaries()
arch_bin.archive_dir_project_binaries(project_uuid, archive_dir)
from opencontext_py.apps.archive.binaries import ArchiveBinaries
project_uuid = "141e814a-ba2d-4560-879f-80f1afb019e9"
archive_dir = "files-4-by---141e814a-ba2d-4560-879f-80f1afb019e9"
deposition_id = 1439449
arch_bin = ArchiveBinaries()
arch_bin.archive_dir_project_binaries(project_uuid, archive_dir, deposition_id)
from opencontext_py.apps.archive.binaries import ArchiveBinaries
dirs = [
"files-5-by---141e814a-ba2d-4560-879f-80f1afb019e9",
"files-6-by---141e814a-ba2d-4560-879f-80f1afb019e9",
]
for archive_dir in dirs:
project_uuid = "141e814a-ba2d-4560-879f-80f1afb019e9"
arch_bin = ArchiveBinaries()
arch_bin.archive_dir_project_binaries(project_uuid, archive_dir)
import shutil
import os
from django.conf import settings
path = settings.STATIC_EXPORTS_ROOT + 'aap-3d/obj-models'
zip_path = settings.STATIC_EXPORTS_ROOT + 'aap-3d/obj-models-zip'
for root, dirs, files in os.walk(path):
for adir in dirs:
zip_dir = os.path.join(path, adir)
zip_file = os.path.join(zip_path, adir)
print(zip_dir + ' to ' + zip_file)
shutil.make_archive(zip_file, 'zip', zip_dir)
import pandas as pd
import shutil
import os
import numpy as np
from django.conf import settings
renames = {
'FORMDATE': 'FORM_DATE',
'TRINOMIAL': 'SITE_NUM',
'SITENUM': 'SITE_NUM',
'TYPE_SITE': 'SITE_TYPE',
'TYPESITE': 'SITE_TYPE',
'TYPE_STE': 'SITE_TYPE',
'SIZESITE': 'SITE_SIZE',
'SITESIZE': 'SITE_SIZE',
'SITENAME': 'SITE_NAME',
'Atlas_Number': 'ATLAS_NUMBER',
'MAT_COL': 'MATERIAL_COLLECTED',
'MATERIALS': 'MATERIAL_COLLECTED',
'ARTIFACTS': 'ARTIFACTS',
'CULT_DESC': 'TIME_CULTURE_DESC',
'TIME_DESC': 'TIME_CULTURE_DESC',
'TIME_OCC': 'TIME_PERIOD',
'TIME_PER': 'TIME_PERIOD',
'SING_COM': 'COMPONENT_SINGLE',
'SINGLE': 'COMPONENT_SINGLE',
'MULT_COM': 'COMPONENT_MULTI',
'MULTIPLE': 'COMPONENT_MULTI',
'COMP_DESC': 'COMPONENT_DESC',
'BASIS': 'COMPONENT_DESC',
'COUNTY': 'COUNTY'
}
path = settings.STATIC_EXPORTS_ROOT + 'texas'
dfs = []
all_cols = []
excel_files = []
for root, dirs, files in os.walk(path):
for act_file in files:
if act_file.endswith('.xls'):
file_num = ''.join(c for c in act_file if c.isdigit())
excel_files.append((int(file_num), act_file))
dir_file = os.path.join(path, act_file)
df = pd.read_excel(dir_file, index_col=None, na_values=['NA'])
df['filename'] = act_file
df = df.applymap(lambda x: x.encode('unicode_escape').decode('utf-8') if isinstance(x, str) else x)
col_names = df.columns.values.tolist()
print('-'*40)
print(act_file)
print(str(col_names))
"""
for bad_col, good_col in renames.items():
if bad_col in col_names:
df.rename(columns={bad_col: good_col}, inplace=True)
"""
new_cols = df.columns.values.tolist()
all_cols = list(set(all_cols + new_cols))
all_cols.sort()
print('Total of {} columns for all dataframes'.format(len(all_cols)))
dfs.append(df)
excel_files = sorted(excel_files)
print('\n'.join([f[1] for f in excel_files]))
all_df = pd.concat(dfs)
csv_all_dir_file = os.path.join(path, 'all-texas.csv')
print('Save the CSV: ' + csv_all_dir_file)
with open(csv_all_dir_file, 'a' ) as f:
while True:
all_df.to_csv(f)
xls_all_dir_file = os.path.join(path, 'all-texas.xlsx')
print('Save the Excel: ' + xls_all_dir_file)
with open(xls_all_dir_file, 'a' ) as f:
while True:
all_df.to_excel(f, sheet_name='Sheet1')
from opencontext_py.apps.imports.records.models import ImportCell
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ocitems.subjects.models import Subject
from opencontext_py.apps.ocitems.geospace.models import Geospace
from opencontext_py.apps.ocitems.subjects.generation import SubjectGeneration
from opencontext_py.apps.edit.items.deletemerge import DeleteMerge
from opencontext_py.libs.solrconnection import SolrConnection
project_uuid = '10aa84ad-c5de-4e79-89ce-d83b75ed72b5'
area_proj_uuid = '5A6DDB94-70BE-43B4-2D5D-35D983B21515'
source_id = 'ref:2289489501377'
area_field = 9
feature_field = 10
specimen_field = 1
man_fixes = Manifest.objects.filter(item_type='subjects', class_uri='oc-gen:cat-plant-remains', project_uuid=project_uuid).order_by('sort')
changed_uuids = []
p_subs = {}
for man_obj in man_fixes:
cont_asses = Assertion.objects.filter(predicate_uuid=Assertion.PREDICATES_CONTAINS, object_uuid=man_obj.uuid)[:1]
if len(cont_asses):
continue
# need to fix missing context association
spec_id = man_obj.label.replace('Specimen ', '')
spec_cell = ImportCell.objects.get(source_id=source_id, record=spec_id, field_num=specimen_field)
area_cell = ImportCell.objects.get(source_id=source_id, field_num=area_field, row_num=spec_cell.row_num)
feat_cell = ImportCell.objects.get(source_id=source_id, field_num=feature_field, row_num=spec_cell.row_num)
l_context = '/{}/Feat. {}'.format(area_cell.record.replace('/', '--'), feat_cell.record)
if feat_cell.record in ['1031', '1089', '1188'] and 'SSGH' in area_cell.record:
l_context = '/SSGH (Khentkawes)/Feat. {}'.format(feat_cell.record)
if l_context == '/KKT-E+/Feat. 33821':
l_context = '/KKT-E/Feat. 33821'
if l_context == '/KKT-E+/Feat. 33831':
l_context = '/KKT-E/Feat. 33831'
print('Find Context: {} for {} import row: {}'.format(l_context, man_obj.label, spec_cell.row_num))
if l_context not in p_subs:
parent_sub = Subject.objects.get(context__endswith=l_context, project_uuid__in=[project_uuid, area_proj_uuid])
p_subs[l_context] = parent_sub
else:
parent_sub = p_subs[l_context]
new_ass = Assertion()
new_ass.uuid = parent_sub.uuid
new_ass.subject_type = 'subjects'
new_ass.project_uuid = man_obj.project_uuid
new_ass.source_id = 'ref:1967003269393-fix'
new_ass.obs_node = '#contents-' + str(1)
new_ass.obs_num = 1
new_ass.sort = 1
new_ass.visibility = 1
new_ass.predicate_uuid = Assertion.PREDICATES_CONTAINS
new_ass.object_type = man_obj.item_type
new_ass.object_uuid = man_obj.uuid
new_ass.save()
sg = SubjectGeneration()
sg.generate_save_context_path_from_uuid(man_obj.uuid)
from opencontext_py.apps.ocitems.assertions.models import Assertion
Assertion.objects.filter(predicate_uuid=Assertion.PREDICATES_CONTAINS, object_uuid='2176cb88-bcb4-4ad9-b4aa-e9009b8c4a66').exclude(uuid='FEC673D2-C1F0-4B62-BF66-29127AE2AE11').delete()
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.subjects.generation import SubjectGeneration
from opencontext_py.apps.ocitems.subjects.models import Subject
from opencontext_py.apps.ocitems.manifest.models import Manifest
from django.core.cache import caches
cache = caches['redis']
cache.clear()
cache = caches['default']
cache.clear()
cache = caches['memory']
cache.clear()
bad_subs = Subject.objects.filter(context__contains='/Egypt/')
bad_uuids = [bs.uuid for bs in bad_subs]
bad_man_objs = Manifest.objects.filter(uuid__in=bad_uuids, class_uri__in=['oc-gen:cat-feature'])
bad_feats = [bm.uuid for bm in bad_man_objs]
f_subs = Subject.objects.filter(uuid__in=bad_feats)
for bad_sub in bad_subs:
sg = SubjectGeneration()
sg.generate_save_context_path_from_uuid(bad_sub.uuid)
from opencontext_py.apps.ocitems.assertions.models import Assertion
keep_proj = '5A6DDB94-70BE-43B4-2D5D-35D983B21515'
keep_p = 'bd0a8c74-c3fe-47bb-bb1a-be067e101069'
keep_p_asses = Assertion.objects.filter(uuid=keep_p, predicate_uuid=Assertion.PREDICATES_CONTAINS)
for keep_p_ch in keep_p_asses:
ch_uuid = keep_p_ch.object_uuid
bad_asses = Assertion.objects.filter(predicate_uuid=Assertion.PREDICATES_CONTAINS, object_uuid=ch_uuid).exclude(uuid=keep_p)
if len(bad_asses):
print('Remove erroneous parents for :' + ch_uuid)
bad_asses.delete()
good_asses = Assertion.objects.filter(uuid=keep_p, predicate_uuid=Assertion.PREDICATES_CONTAINS, object_uuid=ch_uuid)
if len(good_asses) > 1:
print('More than 1 parent for :' + ch_uuid)
redund_ass = Assertion.objects.filter(uuid=keep_p, predicate_uuid=Assertion.PREDICATES_CONTAINS, object_uuid=ch_uuid).exclude(project_uuid=keep_proj)
if len(redund_ass) < len(good_asses):
print('Delete redundant for ' + ch_uuid)
redund_ass.delete()
bad_asses = Assertion.objects.filter(predicate_uuid=Assertion.PREDICATES_CONTAINS, object_uuid=ch_uuid).exclude(uuid=mvt)
if len(bad_asses):
print('delete wrong for: ' + ch_uuid )
bad_asses.delete()
m_asses = Assertion.objects.filter(predicate_uuid=Assertion.PREDICATES_CONTAINS, object_uuid=ch_uuid).exclude(uuid=mvt)
from opencontext_py.apps.imports.records.models import ImportCell
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ocitems.subjects.models import Subject
from opencontext_py.apps.ocitems.geospace.models import Geospace
from opencontext_py.apps.ocitems.subjects.generation import SubjectGeneration
from opencontext_py.apps.edit.items.deletemerge import DeleteMerge
from opencontext_py.libs.solrconnection import SolrConnection
project_uuid = '10aa84ad-c5de-4e79-89ce-d83b75ed72b5'
area_proj_uuid = '5A6DDB94-70BE-43B4-2D5D-35D983B21515'
source_id = 'ref:1967003269393'
area_field = 20
feature_field = 22
specimen_field = 1
man_fixes = Manifest.objects.filter(item_type='subjects', class_uri='oc-gen:cat-feature', project_uuid=project_uuid).order_by('sort')
changed_uuids = []
p_subs = {}
for man_obj in man_fixes:
cont_asses = Assertion.objects.filter(predicate_uuid=Assertion.PREDICATES_CONTAINS, object_uuid=man_obj.uuid)[:1]
if len(cont_asses):
continue
# need to fix missing context association
act_id = man_obj.label.replace('Feat. ', '')
feat_cell = ImportCell.objects.filter(source_id=source_id, record=act_id, field_num=feature_field)[:1][0]
area_cell = ImportCell.objects.get(source_id=source_id, field_num=area_field, row_num=feat_cell.row_num)
l_context = area_cell.record.replace('/', '--')
l_context = '/' + l_context
if act_id in ['1031', '1089', '1188'] and 'SSGH' in l_context:
l_context = '/SSGH (Khentkawes)'
print('Find Context: {} for {} import row: {}'.format(l_context, man_obj.label, feat_cell.row_num))
if l_context not in p_subs:
parent_sub = Subject.objects.get(context__endswith=l_context, project_uuid__in=[project_uuid, area_proj_uuid])
p_subs[l_context] = parent_sub
else:
parent_sub = p_subs[l_context]
print('Adding Context: {} : {}'.format(parent_sub.uuid, parent_sub.context))
new_ass = Assertion()
new_ass.uuid = parent_sub.uuid
new_ass.subject_type = 'subjects'
new_ass.project_uuid = man_obj.project_uuid
new_ass.source_id = source_id + '-fix'
new_ass.obs_node = '#contents-' + str(1)
new_ass.obs_num = 1
new_ass.sort = 1
new_ass.visibility = 1
new_ass.predicate_uuid = Assertion.PREDICATES_CONTAINS
new_ass.object_type = man_obj.item_type
new_ass.object_uuid = man_obj.uuid
new_ass.save()
sg = SubjectGeneration()
sg.generate_save_context_path_from_uuid(man_obj.uuid)
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.manifest.models import Manifest
parent_uuid = '64a12f7b-5ed3-4b1e-beb0-186d5f6c8549'
project_uuid = '10aa84ad-c5de-4e79-89ce-d83b75ed72b5'
area_proj_uuid = '5A6DDB94-70BE-43B4-2D5D-35D983B21515'
child_uuids = []
for child in Assertion.objects.filter(predicate_uuid=Assertion.PREDICATES_CONTAINS, uuid=parent_uuid):
child_uuids.append(child.object_uuid)
keeps_mans = Manifest.objects.filter(uuid__in=child_uuids, project_uuid=area_proj_uuid)
for keep_man in keeps_mans:
rem_men = Manifest.objects.filter(label=keep_man.label, uuid__in=child_uuids, project_uuid=project_uuid)[:1]
if len(rem_men):
delete_uuid = rem_men[0].uuid
merge_into_uuid = keep_man.uuid
print('Remove {} to keep {} with label {}'.format(delete_uuid, merge_into_uuid, keep_man.label))
dm = DeleteMerge()
dm.merge_by_uuid(delete_uuid, merge_into_uuid)
from opencontext_py.apps.edit.items.deletemerge import DeleteMerge
delete_uuid = '12b6512b-22bc-4eb7-b23d-868aff7b380a'
merge_into_uuid = '9a567a71-1cc7-4e51-8e8f-79e0a46e0f40'
dm = DeleteMerge()
dm.merge_by_uuid(delete_uuid, merge_into_uuid)
import json
import random
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.libs.validategeojson import ValidateGeoJson
from opencontext_py.libs.clustergeojson import ClusterGeoJson
from opencontext_py.libs.reprojection import ReprojectUtilities
from opencontext_py.apps.imports.geojson.geojson import GeoJSONimport
from opencontext_py.apps.ocitems.geospace.models import Geospace, GeospaceGeneration
gimp = GeoJSONimport()
gimp.load_into_importer = False
gimp.project_uuid = 'DF043419-F23B-41DA-7E4D-EE52AF22F92F'
gimp.source_id = 'vesco_trenches_2017_4326'
geoclust = ClusterGeoJson()
rpu = ReprojectUtilities()
rpu.set_in_out_crs('EPSG:32636', 'EPSG:4326')
geojsons = {}
for file in ['observation_points', 'avkat_dbo_features', 'features_intensive_survey', 'suvey_units']:
json_obj = gimp.load_json_file('avkat-geo', (file + '.json'))
geojson = LastUpdatedOrderedDict()
geojson['type'] = 'FeatureCollection'
geojson['features'] = []
samp_geojson = LastUpdatedOrderedDict()
samp_geojson['type'] = 'FeatureCollection'
samp_geojson['features'] = []
i = 0
for old_f in json_obj['features']:
# import pdb; pdb.set_trace()
i += 1
new_f = LastUpdatedOrderedDict()
new_f['type'] = 'Feature'
if 'attributes' in old_f:
new_f['properties'] = old_f['attributes']
elif 'properties' in old_f:
new_f['properties'] = old_f['properties']
new_f['geometry'] = LastUpdatedOrderedDict()
if 'rings' in old_f['geometry']:
new_f['geometry']['type'] = 'Polygon'
new_f['geometry']['coordinates'] = old_f['geometry']['rings']
geometry_type = new_f['geometry']['type']
coordinates = new_f['geometry']['coordinates']
v_geojson = ValidateGeoJson()
c_ok = v_geojson.validate_all_geometry_coordinates(geometry_type,
coordinates)
if not c_ok:
print('Fixing coordinates for: {}'.format(i))
coordinates = v_geojson.fix_geometry_rings_dir(geometry_type, coordinates)
new_f['geometry']['coordinates'] = coordinates
coord_str = json.dumps(coordinates, indent=4, ensure_ascii=False)
gg = GeospaceGeneration()
lon_lat = gg.get_centroid_lonlat_coordinates(coord_str, geometry_type)
new_f['properties']['latitude'] = lon_lat[1]
new_f['properties']['longitude'] = lon_lat[0]
else:
if 'x' in old_f['geometry'] and 'y' in old_f['geometry']:
coords = rpu.reproject_coordinate_pair([ float(old_f['geometry']['x']), float(old_f['geometry']['y'])])
if ('type' in old_f['geometry'] and
old_f['geometry']['type'] == 'Point' and
'coordinates' in old_f['geometry']):
coords = old_f['geometry']['coordinates']
if coords is None:
import pdb; pdb.set_trace()
new_f['geometry']['type'] = 'Point'
new_f['geometry']['coordinates'] = coords
if 'x' in old_f['geometry'] and 'y' in old_f['geometry']:
new_f['properties']['utm-x'] = old_f['geometry']['x']
new_f['properties']['utm-y'] = old_f['geometry']['y']
new_f['properties']['lat'] = coords[1]
new_f['properties']['lon'] = coords[0]
geojson['features'].append(new_f)
r = random.randint(1,11)
if r > 9:
samp_geojson['features'].append(new_f)
geojson = geoclust.extact_lon_lat_data_from_geojson(geojson)
gimp.save_json_file(geojson, 'avkat-geo', (file + '-new.geojson'))
gimp.save_json_file(samp_geojson, 'avkat-geo', (file + '-new-sampled.geojson'))
geojsons[file] = geojson
geoclust.cluster_lon_lats()
for file, geojson in geojsons.items():
geojson = geoclust.add_cluster_property_to_geojson(geojson)
gimp.save_json_file(geojson, 'avkat-geo', (file + '-new-clustered.geojson'))
all_geojson = geoclust.make_clusters_geojson()
gimp.save_json_file(all_geojson, 'avkat-geo', 'all-clustered-new.geojson')
import json
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.libs.validategeojson import ValidateGeoJson
from opencontext_py.libs.clustergeojson import ClusterGeoJson
from opencontext_py.apps.imports.geojson.geojson import GeoJSONimport
from opencontext_py.apps.ocitems.geospace.models import Geospace, GeospaceGeneration
from opencontext_py.apps.imports.fieldannotations.models import ImportFieldAnnotation
gimp = GeoJSONimport()
gimp.load_into_importer = False
project_uuid = '02b55e8c-e9b1-49e5-8edf-0afeea10e2be'
configs = [
# ('suvey_units', '', 'SU', 'oc-gen:cat-survey-unit'),
# ('all', 'SU Group ', 'lon-lat-cluster', 'oc-gen:cat-region'),
('features_intensive_survey', '', 'f_no', 'oc-gen:cat-feature'),
]
for file, label_prefix, label_prop, class_uri in configs:
gimp.source_id = file
geojson = gimp.load_json_file('avkat-geo', (file + '-clustered.geojson'))
for feat in geojson['features']:
label = label_prefix + str(feat['properties'][label_prop])
man_obj = Manifest.objects.get(label=label, project_uuid=project_uuid, class_uri=class_uri)
props = LastUpdatedOrderedDict()
props['uri'] = 'https://opencontext.org/subjects/' + man_obj.uuid
old_props = feat['properties']
for key, val in old_props.items():
props[key] = val
feat['properties'] = props
geometry_type = feat['geometry']['type']
coordinates = feat['geometry']['coordinates']
coord_str = json.dumps(coordinates, indent=4, ensure_ascii=False)
gg = GeospaceGeneration()
lon_lat = gg.get_centroid_lonlat_coordinates(coord_str, geometry_type)
Geospace.objects.filter(uuid=man_obj.uuid).delete()
geo = Geospace()
geo.uuid = man_obj.uuid
geo.project_uuid = man_obj.project_uuid
geo.source_id = file
geo.item_type = man_obj.item_type
geo.feature_id = 1
geo.meta_type = ImportFieldAnnotation.PRED_GEO_LOCATION
geo.ftype = geometry_type
geo.latitude = lon_lat[1]
geo.longitude = lon_lat[0]
geo.specificity = 0
# dump coordinates as json string
geo.coordinates = coord_str
try:
geo.save()
except:
print('Problem saving: ' + str(man_obj.uuid))
quit()
gimp.save_json_file(geojson, 'avkat-geo', (file + '-clustered-uris.geojson'))
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ocitems.predicates.models import Predicate
project_uuid = '02b55e8c-e9b1-49e5-8edf-0afeea10e2be'
vars = [
'GIS Feature ID',
'Survey Unit ID',
'Transect Type',
'Survey Bearing',
'Survey Unit Width',
'Linear Meters Walked',
'Shape Length',
'Shape Area',
'Weather',
'Visibility',
'Ceramics',
'Land Use',
'AgType Cereal',
'AgType Plow',
'AgType Fruit',
'AgType Forest',
'AgType Olive',
'AgType Vegetable',
'AgType Vines Grapes',
'AgType Bee Keeping',
'AgType Other',
'AgType Other Description',
'Description',
]
sort = 9
for vvar in vars:
sort += 1
print('Find: ' + vvar)
vman = Manifest.objects.get(label=vvar, project_uuid=project_uuid, item_type='predicates')
vpred = Predicate.objects.get(uuid=vman.uuid)
vpred.sort = sort
vpred.save()
Assertion.objects.filter(predicate_uuid=vman.uuid).update(sort=sort)
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ocitems.predicates.models import Predicate
project_uuid = '02b55e8c-e9b1-49e5-8edf-0afeea10e2be'
vars = [
"Team member walking the 'A' line",
"Team member walking the 'B' line",
"Team member walking the 'C' line",
"Team member walking the 'D' line",
"Team member walking the 'E' line",
"Team member walking the 'F' line",
"Team member walking the 'G' line",
"Team member walking the 'H' line",
"Team Leader",
"Paper Form Completed by",
]
sort = 99
for vvar in vars:
sort += 1
print('Find: ' + vvar)
vman = Manifest.objects.get(label=vvar, project_uuid=project_uuid, item_type='predicates')
vpred = Predicate.objects.get(uuid=vman.uuid)
vpred.sort = sort
vpred.save()
Assertion.objects.filter(predicate_uuid=vman.uuid).update(sort=sort)
import os
from django.conf import settings
from opencontext_py.libs.binaryfiles import BinaryFiles
from opencontext_py.apps.ocitems.mediafiles.models import Mediafile
path = settings.STATIC_EXPORTS_ROOT + 'iiif'
project_uuid = '141e814a-ba2d-4560-879f-80f1afb019e9'
min_size = 104394357.0
bf = BinaryFiles()
meds = Mediafile.objects.filter(project_uuid=project_uuid, filesize__gte=min_size)\
.exclude(mime_type_uri__contains='application/pdf')\
.order_by('-filesize')[:100]
for med in meds:
file_name = med.file_uri.split('/')[-1]
print('Save ' + file_name)
bf.get_cache_remote_file_content_http(file_name, med.file_uri, 'iiif')
from opencontext_py.apps.ocitems.mediafiles.models import Mediafile
project_uuid = '141e814a-ba2d-4560-879f-80f1afb019e9'
min_size = 104394357.0
imgs = {}
imgs['101-drawing-d-ss-016.tif'] = 'https://free.iiifhosting.com/iiif/291e81f8bc2847aaa5f4c532b4f59e1751aa76ce2e7a7ce8acd459ec0f9b2f30/info.json'
imgs['101-drawing-d-ss-016.tif'] = 'https://free.iiifhosting.com/iiif/291e81f8bc2847aaa5f4c532b4f59e1751aa76ce2e7a7ce8acd459ec0f9b2f30/info.json'
imgs['101-drawing-d-gen-027.tif'] = 'https://free.iiifhosting.com/iiif/a696615ab137c4de2a6c7212651df9467cd04505b21dcbc2602c43eaa2ecaf7a/info.json'
imgs['101-drawing-d-ss-002.tif'] = 'https://free.iiifhosting.com/iiif/69317657a4540d28ce549cb082fed05e821b2a205ba3f69a51539772e94866f5/info.json'
imgs['101-drawing-d-e-047.tif'] = 'https://free.iiifhosting.com/iiif/42e0b97f7b0e46a83828e521c04805e771a8e1dfe24fbada611de9b0726313c3/info.json'
imgs['101-drawing-d-ss-001.tif'] = 'https://free.iiifhosting.com/iiif/840728372ee6b611c3baf631f109b79b3e5657f38e71ff2499b34532f62745fa/info.json'
imgs['101-drawing-d-kvt-006.tif'] = 'https://free.iiifhosting.com/iiif/641ba83302bdb3c1b6d5e9a58a1ce948e6ba0da375ebbc5231df6a4453c5c748/info.json'
imgs['101-drawing-d-gen-007.tif'] = 'https://free.iiifhosting.com/iiif/479f5a37dd2f33d959cf72528ce3978f6ff70788625ab71e028bc1eb360494ad/info.json'
imgs['101-drawing-d-ss-015.tif'] = 'https://free.iiifhosting.com/iiif/9a7393c9278fe60e4ab23d4a2bfd0d7192ab048d132795f337a6b79e89c2f24/info.json'
imgs['101-drawing-d-gen-005.tif'] = 'https://free.iiifhosting.com/iiif/2b85999ad86fa3200a91121912b28e9bae96d55dd554d1d45bd2ac7de003532d/info.json'
imgs['101-drawing-d-ss-004.tif'] = 'https://free.iiifhosting.com/iiif/390df4778e208fd9035c822d161a718414dc56d38b02f6e1dc9c1617d9744cb7/info.json'
imgs['101-drawing-d-ss-005.tif'] = 'https://free.iiifhosting.com/iiif/2851bd2a55ed85cfd1775f9b4b9689b776c1e134e488230e4871736f05972127/info.json'
imgs['101-drawing-d-ss-021.tif'] = 'https://free.iiifhosting.com/iiif/7fd8f19d033a10db04a9960042911223d69468b6df9dfeee1f2c0221d3e29f58/info.json'
imgs['101-drawing-d-ss-003.tif'] = 'https://free.iiifhosting.com/iiif/22443f7c36e4a60e6fb1c8eafdecedd44d55edb16d1ff5da3f7d960f46e9c9ad/info.json'
imgs['101-drawing-d-ss-012.tiff'] = 'https://free.iiifhosting.com/iiif/936ba50885c56f808dd4fc4056f6dd0ae993c084379e2975c5091e6f06e5d9ce/info.json'
meds = Mediafile.objects.filter(project_uuid=project_uuid, filesize__gte=min_size)\
.exclude(mime_type_uri__contains='application/pdf')\
.order_by('-filesize')[:100]
for med in meds:
print(med.uuid)
from opencontext_py.apps.imports.records.models import ImportCell
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ocitems.mediafiles.models import Mediafile
project_uuid = '02b55e8c-e9b1-49e5-8edf-0afeea10e2be'
source_id = 'ref:1669580990802'
sunit_field = 12
feature_field = 11
full_field = 15
med_cells = ImportCell.objects.filter(source_id=source_id, field_num=full_field)
for med_cell in med_cells:
feat_man = None
su_man = None
feat_cell = ImportCell.objects.filter(source_id=source_id, row_num=med_cell.row_num, field_num=feature_field)[:1][0]
su_cell = ImportCell.objects.filter(source_id=source_id, row_num=med_cell.row_num, field_num=sunit_field)[:1][0]
try:
feat_man = Manifest.objects.get(label=feat_cell.record, item_type='subjects', class_uri='oc-gen:cat-feature', project_uuid=project_uuid)
except:
pass
try:
su_man = Manifest.objects.get(label=su_cell.record, item_type='subjects', class_uri='oc-gen:cat-survey-unit', project_uuid=project_uuid)
except:
pass
full_uri = med_cell.record
media_f = Mediafile.objects.get(file_uri=full_uri, project_uuid=project_uuid)
if feat_man:
print('Adding Feature: {} : {}'.format(feat_man.uuid, media_f.uuid))
Assertion.objects.filter(uuid=feat_man.uuid, object_uuid=media_f.uuid).delete()
Assertion.objects.filter(object_uuid=feat_man.uuid, uuid=media_f.uuid).delete()
new_ass = Assertion()
new_ass.uuid = feat_man.uuid
new_ass.subject_type = feat_man.item_type
new_ass.project_uuid = feat_man.project_uuid
new_ass.source_id = source_id + '-fix'
new_ass.obs_node = '#obs-' + str(1)
new_ass.obs_num = 1
new_ass.sort = 1
new_ass.visibility = 1
new_ass.predicate_uuid = 'oc-3'
new_ass.object_type = 'media'
new_ass.object_uuid = media_f.uuid
new_ass.save()
new_ass = Assertion()
new_ass.uuid = media_f.uuid
new_ass.subject_type = 'media'
new_ass.project_uuid = project_uuid
new_ass.source_id = source_id + '-fix'
new_ass.obs_node = '#obs-' + str(1)
new_ass.obs_num = 1
new_ass.sort = 1
new_ass.visibility = 1
new_ass.predicate_uuid = 'oc-3'
new_ass.object_type = feat_man.item_type
new_ass.object_uuid = feat_man.uuid
new_ass.save()
if su_man:
print('Adding Survey Unit: {} : {}'.format(su_man.uuid, media_f.uuid))
Assertion.objects.filter(uuid=su_man.uuid, object_uuid=media_f.uuid).delete()
Assertion.objects.filter(object_uuid=su_man.uuid, uuid=media_f.uuid).delete()
new_ass = Assertion()
new_ass.uuid = su_man.uuid
new_ass.subject_type = su_man.item_type
new_ass.project_uuid = su_man.project_uuid
new_ass.source_id = source_id + '-fix'
new_ass.obs_node = '#obs-' + str(1)
new_ass.obs_num = 1
new_ass.sort = 1
new_ass.visibility = 1
new_ass.predicate_uuid = 'oc-3'
new_ass.object_type = 'media'
new_ass.object_uuid = media_f.uuid
new_ass.save()
new_ass = Assertion()
new_ass.uuid = media_f.uuid
new_ass.subject_type = 'media'
new_ass.project_uuid = project_uuid
new_ass.source_id = source_id + '-fix'
new_ass.obs_node = '#obs-' + str(1)
new_ass.obs_num = 1
new_ass.sort = 1
new_ass.visibility = 1
new_ass.predicate_uuid = 'oc-3'
new_ass.object_type = su_man.item_type
new_ass.object_uuid = su_man.uuid
new_ass.save()
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ocitems.mediafiles.models import Mediafile
project_uuid = '02b55e8c-e9b1-49e5-8edf-0afeea10e2be'
source_id = 'ref:1669580990802'
m_mans = Manifest.objects.filter(project_uuid=project_uuid, source_id=source_id, item_type='media')
for m_man in m_mans:
a_chk = Assertion.objects.filter(subject_type='subjects', object_uuid=m_man.uuid)[:1]
if len(a_chk) > 0:
continue
if len(a_chk) == 0:
print('Delete! {} has {} subject links'.format(m_man.uuid, len(a_chk)))
Mediafile.objects.filter(uuid=m_man.uuid).delete()
Assertion.objects.filter(uuid=m_man.uuid).delete()
Assertion.objects.filter(object_uuid=m_man.uuid).delete()
m_man.delete()
sources = [
('trap-geo-yambal',
'Survey Unit ',
'SUID',
'yam-survey-units-reproj-w-uuids-clustered.geojson',
'yam-survey-units-clustered-w-uris.geojson'),
('trap-geo-kazanlak',
'Survey Unit ',
'SUID',
'kaz-survey-units-reproj-w-uuids-best-clustered.geojson',
'kaz-survey-units-clustered-w-uris.geojson'),
('trap-geo-yambal-groups',
'S.U. Group Y',
'lon-lat-cluster',
'yam-clustered.geojson',
'yam-clustered-w-uris.geojson'),
('trap-geo-kazanlak-groups',
'S.U. Group K',
'lon-lat-cluster',
'kaz-clustered.geojson',
'kaz-clustered-w-uris.geojson')
]
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.imports.geojson.geojson import GeoJSONimport
from opencontext_py.apps.ocitems.geospace.models import Geospace
project_uuid = '24e2aa20-59e6-4d66-948b-50ee245a7cfc'
sources = [
('trap-geo-yambal-groups',
'S.U. Group Y',
'lon-lat-cluster',
'yam-clustered.geojson',
'yam-clustered-w-uris.geojson'),
('trap-geo-kazanlak-groups',
'S.U. Group K',
'lon-lat-cluster',
'kaz-clustered.geojson',
'kaz-clustered-w-uris.geojson')
]
for source_id, prefix, id_prop, old_file, new_file in sources:
Geospace.objects\
.filter(source_id=source_id,
project_uuid=project_uuid,
ftype__in=['Polygon', 'Multipolygon']).delete()
gimp = GeoJSONimport()
gimp.load_into_importer = False
gimp.project_uuid = project_uuid
gimp.source_id = source_id
json_obj = gimp.load_json_file('trap-geo', old_file)
rev_json = LastUpdatedOrderedDict()
rev_json['type'] = 'FeatureCollection'
rev_json['features'] = []
for feat in json_obj['features']:
suid = feat['properties'][id_prop]
label = prefix + str(suid)
print('Find {}'.format(label))
man_obj = Manifest.objects.get(label=label, project_uuid=project_uuid, item_type='subjects')
feat['properties']['uri'] = 'http://opencontext.org/subjects/' + man_obj.uuid
if 'uuid' in feat['properties']:
feat['properties'].pop('uuid')
rev_json['features'].append(feat)
print('{} is {}'.format(man_obj.label, man_obj.uuid))
gimp.save_json_file(rev_json, 'trap-geo', new_file)
gimp.process_features_in_file('trap-geo', new_file)
from opencontext_py.apps.ocitems.assertions.models import Assertion
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ocitems.predicates.models import Predicate
project_uuid = 'a52bd40a-9ac8-4160-a9b0-bd2795079203'
pred = Manifest.objects.get(uuid=predicate_uuid)
mans = Manifest.objects.filter(project_uuid=project_uuid, item_type='media')
pers = Manifest.objects.get(uuid='0dcda4ad-812b-484f-ad70-3613d063cf52') # Kevin
predicate_uuid = 'fc335a0d-42e0-42ae-bb11-0ef46ec048e8'
pm = Predicate.objects.get(uuid=predicate_uuid)
for man_obj in mans:
Assertion.objects.filter(uuid=man_obj.uuid, object_type='persons').delete()
new_ass = Assertion()
new_ass.uuid = man_obj.uuid
new_ass.subject_type = man_obj.item_type
new_ass.project_uuid = man_obj.project_uuid
new_ass.source_id = 'kevin-contributor'
new_ass.obs_node = '#obs-' + str(1)
new_ass.obs_num = 1
new_ass.sort = 1
new_ass.visibility = 1
new_ass.predicate_uuid = predicate_uuid
new_ass.object_type = pers.item_type
new_ass.object_uuid = pers.uuid
new_ass.save()
| gpl-3.0 |
hugobowne/scikit-learn | examples/gaussian_process/plot_gpc_xor.py | 104 | 2132 | """
========================================================================
Illustration of Gaussian process classification (GPC) on the XOR dataset
========================================================================
This example illustrates GPC on XOR data. Compared are a stationary, isotropic
kernel (RBF) and a non-stationary kernel (DotProduct). On this particular
dataset, the DotProduct kernel obtains considerably better results because the
class-boundaries are linear and coincide with the coordinate axes. In general,
stationary kernels often obtain better results.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title("%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
RTHMaK/RPGOne | Documents/sklearn-stub-master/doc/conf.py | 2 | 9365 | # -*- coding: utf-8 -*-
#
# sklearn-stub documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 18 14:44:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('sphinxext'))
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'numpydoc',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'gen_rst'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sklearn-stub'
copyright = u'2016, Vighnesh Birodkar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sklearn-stubdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'sklearn-stub.tex', u'sklearn-stub Documentation',
u'Vighnesh Birodkar', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sklearn-stub', u'sklearn-stub Documentation',
[u'Vighnesh Birodkar'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sklearn-stub', u'sklearn-stub Documentation',
u'Vighnesh Birodkar', 'sklearn-stub', 'One line description of project.',
'Miscellaneous'),
]
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
app.connect('autodoc-process-docstring', generate_example_rst)
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| apache-2.0 |
johnwu93/find_best_mall | recomendation system/recsys.py | 3 | 4621 | import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
class recsys(object):
#X is the truth
def __init__(self,X):
self.X = X
self.X_predict = None
self.X_train = None
pass
#get the necessary helper functions to do an analysis. may require more parameters for derived classes
def get_helpers(self, feature_func = None, similarity_func = None):
if ( not(feature_func is None) or (self.feature_helper is None)):
self.feature_helper = feature_func;
if ( not(similarity_func is None) or (self.similarity_helper is None)):
self.similarity_helper = similarity_func;
def similarity(self, features, similarity_helper, cluster, k):
#creates an N-by-N matrix where the i, j entry tell how the ith person is related to the jth person. the column is referring to one persn
# this matrix is NOT SYMMETRIC
# input
# feature - matrix how you are going to compare the objects where you have N peop
# similarity_helper -
S=pairwise_distances(features, metric=similarity_helper)
#S = self.similarity_helper(W)
S = S-np.diag(S.diagonal())
#modifies S for cluster information
cluster_ind = np.array([cluster]*features.shape[0])
S = np.multiply(S, 1*(cluster_ind == cluster_ind.T))
#implement the neighborbased part. This is for better results. Get top K similar people for each user.
np.apply_along_axis(find_top_k, 0,S , k=k) #computations can be slow for this model
S_norm =np.multiply(S, 1/np.sum(S, axis=0)) #fast multiplication
S_norm[np.isnan(S_norm)]=0 #deals with nan problem. (Consider the instance that you are the only user and nobody is similar to you.)
return S_norm
def get_parameters(self, **kwargs):
pass
#this varies from learner to learner. Some learners do not have this because they do not have parameters to be tuned
def get_parameters_2(self, **kwargs):
pass
def predict_for_user(self, user_ratings, user_feat, k, feature_transform_all =None):
#output: predicted indices of the stores that are most liked by a user
#f transform user into a more appropiate feature
#makes a prediction for the user
#for matrix factorization, preprocessing must be made. Specifically, user_feat and feat must be already defined
pass
def transform_training(self, train_indices, test_indices):
#Uses the information of the training and testing indices to transform X for training purposes
#train_incides must be a |Train_Data|-by-2 matrix.
#train_indices come in tuples
self.X_train = np.copy(self.X);
if((test_indices is None) and (train_indices is None) ):
return
elif(not (test_indices is None)):
self.X_train[test_indices[:, 0], test_indices[:, 1]] = np.zeros((1, test_indices.shape[0]))
return
else:
#create a binary matrix that
Nitems, Nusers = self.X.shape
test_indicator = np.ones((Nitems, Nusers))
test_indicator[train_indices[:, 0], train_indices[:, 1]] = np.zeros((1, train_indices.shape[0]))
self.X_train[test_indicator == 1] = 0
def fit(self, train_indices = "None", test_indices = "None"):
pass
#the code of the actual
#i
#in reality, this would not be used alot
def predict(self, indices):
if(not isinstance(indices, np.ndarray)):
raise Exception("Dawg, your indices have to be an ndarray")
return self.X_predict(indices[:, 0], indices[:, 1])
def score(self, truth_index):
if(not isinstance(truth_index, np.ndarray)):
raise Exception("Dawg, your testing indices have to be an ndarray")
return self.score_helper(self.X, self.X_predict, truth_index)
def get_helper2(self, name, function):
if(name == 'feature_helper'):
self.feature_helper = function
return
if(name == 'similarity_helper'):
self.similarity_helper = function
return
if(name == 'score_helper'):
self.score_helper = function
return
else:
raise Exception("Cannot find feature function corresponding to the input name")
def find_top_k(x, k):
#return an array where anything less than the top k values of an array is zero
if( np.count_nonzero(x) <k):
return x
else:
x[x < -1*np.partition(-1*x, k)[k]] = 0
return x
| mit |
frank-tancf/scikit-learn | sklearn/utils/tests/test_validation.py | 56 | 18600 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
has_fit_parameter,
check_is_fitted,
check_consistent_length,
)
from sklearn.exceptions import NotFittedError
from sklearn.exceptions import DataConversionWarning
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=(10, 10), dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M.ravel())
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
@ignore_warnings
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
assert_warns(DeprecationWarning, check_array, [0, 1, 2])
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [[]])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = assert_warns(DeprecationWarning, check_array, [42],
ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
zymsys/sms-tools | lectures/05-Sinusoidal-model/plots-code/spectral-peaks-interpolation.py | 22 | 1234 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
from scipy.fftpack import fft, ifft
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9.5, 5.5))
plt.subplot (2,1,1)
plt.plot(freqaxis,mX,'r', lw=1.5)
plt.axis([300,2500,-70,max(mX)])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + spectral peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis,pX,'c', lw=1.5)
plt.axis([300,2500,min(pX),-1])
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + spectral peaks')
plt.tight_layout()
plt.savefig('spectral-peaks-interpolation.png')
plt.show()
| agpl-3.0 |
cainiaocome/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
e-q/scipy | scipy/ndimage/filters.py | 5 | 52471 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections.abc import Iterable
import warnings
import numpy
import operator
from numpy.core.multiarray import normalize_axis_index
from . import _ni_support
from . import _nd_image
from . import _ni_docstrings
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
def _invalid_origin(origin, lenw):
return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2)
@_ni_docstrings.docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a 1-D correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
1-D sequence of numbers.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import correlate1d
>>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([ 8, 26, 8, 12, 7, 28, 36, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = normalize_axis_index(axis, input.ndim)
if _invalid_origin(origin, len(weights)):
raise ValueError('Invalid origin; origin must satisfy '
'-(len(weights) // 2) <= origin <= '
'(len(weights)-1) // 2')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return output
@_ni_docstrings.docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a 1-D convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
1-D sequence of numbers.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
Examples
--------
>>> from scipy.ndimage import convolve1d
>>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([14, 24, 4, 13, 12, 36, 27, 0])
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
return correlate1d(input, weights, axis, output, mode, cval, origin)
def _gaussian_kernel1d(sigma, order, radius):
"""
Computes a 1-D Gaussian convolution kernel.
"""
if order < 0:
raise ValueError('order must be non-negative')
exponent_range = numpy.arange(order + 1)
sigma2 = sigma * sigma
x = numpy.arange(-radius, radius+1)
phi_x = numpy.exp(-0.5 / sigma2 * x ** 2)
phi_x = phi_x / phi_x.sum()
if order == 0:
return phi_x
else:
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
# p'(x) = -1 / sigma ** 2
# Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
# coefficients of q(x)
q = numpy.zeros(order + 1)
q[0] = 1
D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x)
P = numpy.diag(numpy.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
Q_deriv = D + P
for _ in range(order):
q = Q_deriv.dot(q)
q = (x[:, None] ** exponent_range).dot(q)
return q * phi_x
@_ni_docstrings.docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""1-D Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : int, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. A positive order corresponds to convolution with
that derivative of a Gaussian.
%(output)s
%(mode_reflect)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
Examples
--------
>>> from scipy.ndimage import gaussian_filter1d
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
>>> import matplotlib.pyplot as plt
>>> np.random.seed(280490)
>>> x = np.random.randn(101).cumsum()
>>> y3 = gaussian_filter1d(x, 3)
>>> y6 = gaussian_filter1d(x, 6)
>>> plt.plot(x, 'k', label='original data')
>>> plt.plot(y3, '--', label='filtered, sigma=3')
>>> plt.plot(y6, ':', label='filtered, sigma=6')
>>> plt.legend()
>>> plt.grid()
>>> plt.show()
"""
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
# Since we are calling correlate, not convolve, revert the kernel
weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
return correlate1d(input, weights, axis, output, mode, cval, 0)
@_ni_docstrings.docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : int or sequence of ints, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. A positive order
corresponds to convolution with that derivative of a Gaussian.
%(output)s
%(mode_multiple)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
1-D convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
>>> from scipy import misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = gaussian_filter(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order, mode in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.prewitt(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = normalize_axis_index(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
return output
@_ni_docstrings.docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = normalize_axis_index(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
return output
@_ni_docstrings.docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords=None):
"""
N-D Laplace filter using a provided second derivative function.
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative2(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-D Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.laplace(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@_ni_docstrings.docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using Gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
>>> ax1.imshow(result)
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@_ni_docstrings.docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords=None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Returns
-------
gaussian_gradient_magnitude : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if _invalid_origin(origin, lenw):
raise ValueError('Invalid origin; origin must satisfy '
'-(weights.shape[k] // 2) <= origin[k] <= '
'(weights.shape[k]-1) // 2')
if not weights.flags.contiguous:
weights = weights.copy()
output = _ni_support._get_output(output, input)
temp_needed = numpy.may_share_memory(input, output)
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _ni_support._get_output(output.dtype, input)
if not isinstance(mode, str) and isinstance(mode, Iterable):
raise RuntimeError("A sequence of modes is not supported")
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
if temp_needed:
temp[...] = output
output = temp
return output
@_ni_docstrings.docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
%(input)s
weights : ndarray
array of weights, same number of dimensions as input
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
result : ndarray
The result of correlation of `input` with `weights`.
See Also
--------
convolve : Convolve an image with a kernel.
Examples
--------
Correlation is the process of moving a filter mask often referred to
as kernel over the image and computing the sum of products at each location.
>>> from scipy.ndimage import correlate
>>> input_img = np.arange(25).reshape(5,5)
>>> print(input_img)
[[ 0 1 2 3 4]
[ 5 6 7 8 9]
[10 11 12 13 14]
[15 16 17 18 19]
[20 21 22 23 24]]
Define a kernel (weights) for correlation. In this example, it is for sum of
center and up, down, left and right next elements.
>>> weights = [[0, 1, 0],
... [1, 1, 1],
... [0, 1, 0]]
We can calculate a correlation result:
For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``.
>>> correlate(input_img, weights)
array([[ 6, 10, 15, 20, 24],
[ 26, 30, 35, 40, 44],
[ 51, 55, 60, 65, 69],
[ 76, 80, 85, 90, 94],
[ 96, 100, 105, 110, 114]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@_ni_docstrings.docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
%(input)s
weights : array_like
Array of weights, same number of dimensions as input
%(output)s
%(mode_reflect)s
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
%(origin_multiple)s
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the N-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e., where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`) are treated as zeros.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@_ni_docstrings.docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a 1-D uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import uniform_filter1d
>>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([4, 3, 4, 1, 4, 6, 6, 3])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = normalize_axis_index(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
return output
@_ni_docstrings.docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multidimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
uniform_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
1-D uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.uniform_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin, mode in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a 1-D minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import minimum_filter1d
>>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([2, 0, 0, 0, 1, 1, 0, 0])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = normalize_axis_index(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return output
@_ni_docstrings.docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a 1-D maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import maximum_filter1d
>>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([8, 8, 8, 4, 9, 9, 9, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = normalize_axis_index(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return output
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint, dtype=bool)
if not footprint.any():
raise ValueError("All-zero footprint is not supported.")
if footprint.all():
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
temp_needed = numpy.may_share_memory(input, output)
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _ni_support._get_output(output.dtype, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin, mode in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
if not isinstance(mode, str) and isinstance(mode, Iterable):
raise RuntimeError(
"A sequence of modes is not supported for non-separable "
"footprints")
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
if temp_needed:
temp[...] = output
output = temp
return output
@_ni_docstrings.docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
minimum_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
A sequence of modes (one per axis) is only supported when the footprint is
separable. Otherwise, a single mode string must be provided.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.minimum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@_ni_docstrings.docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
maximum_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
A sequence of modes (one per axis) is only supported when the footprint is
separable. Otherwise, a single mode string must be provided.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.maximum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@_ni_docstrings.docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output = _ni_support._get_output(output, input)
temp_needed = numpy.may_share_memory(input, output)
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _ni_support._get_output(output.dtype, input)
if not isinstance(mode, str) and isinstance(mode, Iterable):
raise RuntimeError(
"A sequence of modes is not supported by non-separable rank "
"filters")
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
if temp_needed:
temp[...] = output
output = temp
return output
@_ni_docstrings.docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
rank_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.rank_filter(ascent, rank=42, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
rank = operator.index(rank)
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@_ni_docstrings.docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculate a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
median_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.median_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@_ni_docstrings.docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
percentile_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@_ni_docstrings.docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a 1-D filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1-D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int function(double *input_line, npy_intp input_length,
double *output_line, npy_intp output_length,
void *user_data)
int function(double *input_line, intptr_t input_length,
double *output_line, intptr_t output_length,
void *user_data)
The calling function iterates over the lines of the input and output
arrays, calling the callback function at each line. The current line
is extended according to the border conditions set by the calling
function, and the result is copied into the array that is passed
through ``input_line``. The length of the input line (after extension)
is passed through ``input_length``. The callback function should apply
the filter and store the result in the array passed through
``output_line``. The length of the output line is passed through
``output_length``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = normalize_axis_index(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments,
extra_keywords)
return output
@_ni_docstrings.docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a multidimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1-D array of double values.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int callback(double *buffer, npy_intp filter_size,
double *return_value, void *user_data)
int callback(double *buffer, intptr_t filter_size,
double *return_value, void *user_data)
The calling function iterates over the elements of the input and
output arrays, calling the callback function at each element. The
elements within the footprint of the filter at the current element are
passed through the ``buffer`` parameter, and the number of elements
within the footprint through ``filter_size``. The calculated value is
returned in ``return_value``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return output
| bsd-3-clause |
clingsz/GAE | immuAnalysis/clustering.py | 1 | 2611 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 30 14:19:29 2017
@author: cling
"""
from sklearn.cluster import AgglomerativeClustering as ag
import numpy
import matplotlib.pyplot as plt
import misc.gap as gap
###########################
# Clustering main methods
###########################
def gap_cluster(x):
best_k = gap.fit_gap_stats(x,bootstraps=100,kMin=1,kMax=100)
cids,counts,mses = ag_clust(x,best_k)
return (cids,counts,mses)
def ag_clust(x,k):
a = ag(n_clusters=k)
a.fit(x)
lbs = a.labels_
cids = []
for i in range(k):
lst = numpy.where(lbs==i)[0]
cids.append(lst)
counts,mses = analyze_cluster(x,cids)
lst = get_order(mses)
mses = reorder(mses,lst)
cids = reorder(cids,lst)
counts = reorder(counts,lst)
show_cluster(x,cids,mses)
return cids,counts,mses
def analyze_cluster(x,cids):
counts = []
mses = []
for i in range(len(cids)):
y = x[cids[i],:]
ym = numpy.mean(y,axis=0)
ym = numpy.reshape(ym,[1,len(ym)])
ya = numpy.repeat(ym,y.shape[0],axis=0)
m = numpy.mean((y - ya)**2)
print i,y.shape[0],m
counts.append(y.shape[0])
mses.append(m)
return counts,mses
def reorder(A,lst):
B = []
for i in range(len(A)):
B.append(A[lst[i]])
return B
def get_order(metric):
ke = metric
lst = sorted(range(len(ke)),key=lambda x:ke[x])
return lst
####################################
# Clustering visualization methods
####################################
def draw_bound(bounds,x):
nb = -0.5
pos = []
for b in bounds:
pos.append((nb+nb+b)/2)
nb += b
plt.plot([nb,nb],[-1,x.shape[0]],'k--',markerSize=10)
plt.xlim([0-0.5,x.shape[1]-0.5])
plt.ylim([0-0.5,x.shape[0]-0.5])
return pos
def show_cluster(x,cids=None,mses=None,obNames=None):
if cids is None:
cids = [[i for i in range(x.shape[0])]]
K = len(cids)
x = x.transpose()
bounds = []
for i in range(K):
bounds.append(len(cids[i]))
plt.figure(figsize=[15,15])
plt.imshow(x,aspect='auto',interpolation='none',vmax=3,vmin=-3,cmap='PRGn')
plt.colorbar()
if obNames is not None:
plt.yticks(range(len(obNames)),obNames)
pos = draw_bound(bounds,x)
if mses is not None:
mses = numpy.round(mses,decimals=2)
plt.xticks(pos,mses,rotation='vertical')
plt.xlabel('MSEs')
############## test #################
def test():
# x = numpy.random.randn(100,8)
x = gap.init_board_gauss(200,5)
gap_cluster(x)
| gpl-3.0 |
Gorbagzog/StageIAP | Plot_results.py | 1 | 1139 | #!/usr/bin/env python3
# -*-coding:Utf-8 -*
"""Load and plot best fit parameters estimated from MCMC output"""
import numpy as np
import matplotlib.pyplot as plt
def load_results(directory):
results = np.loadtxt(directory + '/Results.txt', skiprows=2).astype('float')
results = results[results[:, 0].argsort()] # sort the array by redshift
return results
def plot_one(directory, results, idx_result, result_label):
redshiftsbinTrue = np.array([0.37, 0.668, 0.938, 1.286, 1.735, 2.220, 2.683, 3.271, 3.926, 4.803])
errm = results[:, idx_result+1] - results[:, idx_result+2]
errp = results[:, idx_result+3] - results[:, idx_result+1]
plt.figure()
plt.errorbar(redshiftsbinTrue[:], results[:, idx_result+1], yerr=[errm, errp])
plt.ylabel(result_label, size=20)
plt.xlabel('Redshift')
plt.savefig(directory + "/Plots/Result_" + result_label + '.pdf')
plt.close()
def plot_all(directory):
labels = ['$M_{1}$', '$M_{*,0}$', '$\\beta$', '$\delta$', '$\gamma$', r'$\xi$']
results = load_results(directory)
for i in range(6):
plot_one(directory, results, 3*i, labels[i])
| gpl-3.0 |
igolan/word2vec | show_tsne.py | 1 | 5963 | #!/usr/bin/env python
from struct import calcsize, pack, unpack
import numpy as np
import sys
import matplotlib.pyplot as plt
def _read_unpack(fmt, fh):
return unpack(fmt, fh.read(calcsize(fmt)))
def get_bh_tsne_res():
# Read and pass on the results
res = []
with open(result_filename + '.tsne.dat', 'rb') as output_file:
# The first two integers are just the number of samples and the
# dimensionality
result_samples, result_dims = _read_unpack('ii', output_file)
# Collect the results, but they may be out of order
results = [_read_unpack('{}d'.format(result_dims), output_file)
for _ in range(result_samples)]
# Now collect the landmark data so that we can return the data in
# the order it arrived
results = [(_read_unpack('i', output_file), e) for e in results]
# Put the results in order and yield it
results.sort()
for _, result in results:
sample_res = []
for r in result:
sample_res.append(r)
res.append(sample_res)
#yield result
# The last piece of data is the cost for each sample, we ignore it
#read_unpack('{}d'.format(sample_count), output_file)
return (result_samples, result_dims, np.asarray(res, dtype='float64'))
def parse_tsne_res():
res = []
for result in get_bh_tsne_res():
sample_res = []
for r in result:
sample_res.append(r)
res.append(sample_res)
return np.asarray(res, dtype='float64')
def get_words_dict():
words_to_line_dict = {}
line_to_word_dict = {}
with open(result_filename + '.vec.txt','r') as f:
line_num = 0
for line in f:
words_to_line_dict[line.split(None, 1)[0]] = line_num
line_to_word_dict[line_num] = line.split(None, 1)[0]
line_num += 1
return (words_to_line_dict,line_to_word_dict)
def get_tsne_matrix():
(result_samples, result_dims, tsne_res) = get_bh_tsne_res()
tsne_matrix = np.zeros([result_samples,result_dims])
for samp_id in range(0,len(tsne_res)):
for dim in range(0,result_dims):
tsne_matrix[samp_id][dim] = tsne_res[samp_id][dim]
# Print warning if we have point at (0,0) - something might went wrong with the TSNE or its' parsing
for row in range(0,tsne_matrix.shape[0]):
if tsne_matrix[row][0] == 0 and tsne_matrix[row][1] == 0:
print("Note! the point in row " + str(row) + " is (0,0), make sure no points are missing! (result_samples=" + str(result_samples) + ", result_dims=" + str(result_dims) + ")")
# Normalize to 1
for dim in range(0,result_dims):
tsne_matrix[:,dim] -= tsne_matrix[:,dim].min()
tsne_matrix[:,dim] /= tsne_matrix[:,dim].max()
print("Loaded " + str(tsne_matrix.shape[0]) + " words")
return tsne_matrix
# def drawAnnote(self, ax, x, y, annote):
# """
# Draw the annotation on the plot
# """
# if (x, y) in self.drawnAnnotations:
# markers = self.drawnAnnotations[(x, y)]
# for m in markers:
# m.set_visible(not m.get_visible())
# self.ax.figure.canvas.draw_idle()
# else:
# t = ax.text(x, y, " - %s" % (annote),)
# m = ax.scatter([x], [y], marker='d', c='r', zorder=100)
# self.drawnAnnotations[(x, y)] = (t, m)
# self.ax.figure.canvas.draw_idle()
def onpick3(event):
ind = event.ind
x = np.take(tsne_matrix[:,0], ind)
y = np.take(tsne_matrix[:,1], ind)
#ax.annotate('annoaaatate', xy=(x, y), xytext=(x+0.1, y+0.1), arrowprops=dict(facecolor='black', shrink=0.05))
#t = plt.text(np.take(tsne_matrix[:,0], ind), np.take(tsne_matrix[:,1], " - %s" % "hey"))
print("--------")
for indx in ind:
print("Clicked on word " + line_to_word_dict[indx] + " (index=" + str(indx) + ") , location=(" + str(pretty_float(tsne_matrix[indx][0])) + "," + str(pretty_float(tsne_matrix[
indx][1])) + ")")
def pretty_float(flt):
return "%0.2f" % flt
# ax1 = fig.add_subplot(111)
# col = ax1.scatter(x, y, 100*s, c, picker=True)
# #fig.savefig('pscoll.eps')
# fig.canvas.mpl_connect('pick_event', onpick3)
fig = plt.figure()
ax = fig.add_subplot(111)
def show_tsne_matrix(tsne_matrix,words_to_line_dict):
colors_options="brcgmyk"
pnts_colors = [colors_options[0] for i in range(0,tsne_matrix[:,0].__len__())]
with open('tsne_group1.txt','r') as f:
for line in f:
word=line.split(None, 1)[0]
if word in words_to_line_dict.keys():
pnts_colors[words_to_line_dict[word]] = colors_options[1]
print("Detected " + str(word) + " on index " + str(words_to_line_dict[word]) + " , location=(" + str(pretty_float(tsne_matrix[words_to_line_dict[word]][0])) + "," + str(pretty_float(tsne_matrix[words_to_line_dict[word]][1])) + ")")
ax.scatter(tsne_matrix[:,0], tsne_matrix[:,1], c=pnts_colors, picker=True) #, s=area, c=colors, alpha=0.5)
plt.xlim(0, 1)
plt.ylim(0, 1)
#ax.annotate('annotate', xy=(0.5, 0.5), xytext=(0.5+0.2, 0.5+0.2), arrowprops=dict(facecolor='black', shrink=0.05))
fig.canvas.mpl_connect('pick_event', onpick3)
plt.show()
if __name__ == '__main__':
if sys.argv.__len__() < 2:
print("Usage: ./show_tsne <result_file_name> , for example: ./show_tsne results_amazon ** Without .vec extension!")
print("Marked words are saved at tsne_group1.txt")
exit()
result_filename = sys.argv[1]
(words_to_line_dict,line_to_word_dict) = get_words_dict()
tsne_matrix = get_tsne_matrix()
show_tsne_matrix(tsne_matrix,words_to_line_dict)
#exit()
| apache-2.0 |
fyffyt/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
curiousguy13/shogun | examples/undocumented/python_modular/graphical/regression_gaussian_process_demo.py | 10 | 9249 | ###########################################################################
# Mean prediction from Gaussian Processes based on
# classifier_libsvm_minimal_modular.py
# plotting functions have been adapted from the pyGP library
# https://github.com/jameshensman/pyGP
###########################################################################
from numpy import *
from numpy.random import randn
from modshogun import *
import pylab as PL
import matplotlib
import logging as LG
import scipy as SP
from modshogun import GradientModelSelection
from modshogun import ModelSelectionParameters, R_EXP, R_LINEAR
from modshogun import ParameterCombination
def plot_training_data(x, y,
shift=None,
replicate_indices=None,
format_data={'alpha':.5,
'marker':'.',
'linestyle':'--',
'lw':1,
'markersize':9},
draw_arrows=0,
plot_old=False):
"""
Plot training data input x and output y into the
active figure (See http://matplotlib.sourceforge.net/ for details of figure).
Instance plot without replicate groups:
.. image:: ../images/plotTraining.png
:height: 8cm
Instance plot with two replicate groups and a shift in x-koords:
.. image:: ../images/plotTrainingShiftX.png
:height: 8cm
**Parameters:**
x : [double]
Input x (e.g. time).
y : [double]
Output y (e.g. expression).
shift : [double]
The shift of each replicate group.
replicate_indices : [int]
Indices of replicates for each x, rexpectively
format_data : {format}
Format of the data points. See http://matplotlib.sourceforge.net/ for details.
draw_arrows : int
draw given number of arrows (if greator than len(replicate) draw all arrows.
Arrows will show the time shift for time points, respectively.
"""
x_shift = SP.array(x.copy())
if shift is not None and replicate_indices is not None:
assert len(shift) == len(SP.unique(replicate_indices)), 'Need one shift per replicate to plot properly'
_format_data = format_data.copy()
if(format_data.has_key('alpha')):
_format_data['alpha'] = .2*format_data['alpha']
else:
_format_data['alpha'] = .2
number_of_groups = len(SP.unique(replicate_indices))
for i in SP.unique(replicate_indices):
x_shift[replicate_indices == i] -= shift[i]
for i in SP.unique(replicate_indices):
col = matplotlib.cm.jet(i / (2. * number_of_groups))
_format_data['color'] = col
if(plot_old):
PL.plot(x[replicate_indices == i], y[replicate_indices == i], **_format_data)
if(draw_arrows):
range = SP.where(replicate_indices == i)[0]
for n in SP.arange(range[0], range[-1], max(1, round(len(range) / draw_arrows))):
offset = round((len(range)-1) / draw_arrows)
n += max(int((i+1)*offset/number_of_groups),1)
PL.text((x_shift[n]+x[n])/2., y[n],
"%.2f"%(-shift[i]),
ha='center',va='center',
fontsize=10)
PL.annotate('', xy=(x_shift[n], y[n]),
xytext=(x[n], y[n]),va='center',
arrowprops=dict(facecolor=col,
alpha=.2,
shrink=.01,
frac=.2,
headwidth=11,
width=11))
#PL.plot(x,y,**_format_data)
if(replicate_indices is not None):
number_of_groups = len(SP.unique(replicate_indices))
#format_data['markersize'] = 13
#format_data['alpha'] = .5
for i in SP.unique(replicate_indices):
col = matplotlib.cm.jet(i / (2. * number_of_groups))
format_data['color'] = col
PL.plot(x_shift[replicate_indices == i], y[replicate_indices == i], **format_data)
else:
print(x_shift.shape)
number_of_groups = x_shift.shape[0]
for i in xrange(number_of_groups):
col = matplotlib.cm.jet(i / (2. * number_of_groups))
format_data['color'] = col
PL.plot(x[i], y[i], **format_data)
# return PL.plot(x_shift,y,**format_data)
def plot_sausage(X, mean, std, alpha=None, format_fill={'alpha':0.3, 'facecolor':'k'}, format_line=dict(alpha=1, color='g', lw=3, ls='dashed')):
"""
plot saussage plot of GP. I.e:
.. image:: ../images/sausage.png
:height: 8cm
**returns:** : [fill_plot, line_plot]
The fill and the line of the sausage plot. (i.e. green line and gray fill of the example above)
**Parameters:**
X : [double]
Interval X for which the saussage shall be plottet.
mean : [double]
The mean of to be plottet.
std : [double]
Pointwise standard deviation.
format_fill : {format}
The format of the fill. See http://matplotlib.sourceforge.net/ for details.
format_line : {format}
The format of the mean line. See http://matplotlib.sourceforge.net/ for details.
"""
X = X.squeeze()
Y1 = (mean + 2 * std)
Y2 = (mean - 2 * std)
if(alpha is not None):
old_alpha_fill = min(1, format_fill['alpha'] * 2)
for i, a in enumerate(alpha[:-2]):
format_fill['alpha'] = a * old_alpha_fill
hf = PL.fill_between(X[i:i + 2], Y1[i:i + 2], Y2[i:i + 2], lw=0, **format_fill)
i += 1
hf = PL.fill_between(X[i:], Y1[i:], Y2[i:], lw=0, **format_fill)
else:
hf = PL.fill_between(X, Y1, Y2, **format_fill)
hp = PL.plot(X, mean, **format_line)
return [hf, hp]
class CrossRect(matplotlib.patches.Rectangle):
def __init__(self, *args, **kwargs):
matplotlib.patches.Rectangle.__init__(self, *args, **kwargs)
#self.ax = ax
# def get_verts(self):
# rectverts = matplotlib.patches.Rectangle.get_verts(self)
# return verts
def get_path(self, *args, **kwargs):
old_path = matplotlib.patches.Rectangle.get_path(self)
verts = []
codes = []
for vert, code in old_path.iter_segments():
verts.append(vert)
codes.append(code)
verts.append([1, 1])
codes.append(old_path.LINETO)
new_path = matplotlib.artist.Path(verts, codes)
return new_path
def create_toy_data():
#0. generate Toy-Data; just samples from a superposition of a sin + linear trend
xmin = 1
xmax = 2.5*SP.pi
x = SP.arange(xmin,xmax,(xmax-xmin)/100.0)
C = 2 #offset
sigma = 0.5
b = 0
y = b*x + C + 1*SP.sin(x)
# dy = b + 1*SP.cos(x)
y += sigma*random.randn(y.shape[0])
y-= y.mean()
x = x[:,SP.newaxis]
return [x,y]
def run_demo():
LG.basicConfig(level=LG.INFO)
random.seed(572)
#1. create toy data
[x,y] = create_toy_data()
feat_train = RealFeatures(transpose(x));
labels = RegressionLabels(y);
n_dimensions = 1
#2. location of unispaced predictions
X = SP.linspace(0,10,10)[:,SP.newaxis]
#new interface with likelihood parametres being decoupled from the covaraince function
likelihood = GaussianLikelihood()
covar_parms = SP.log([2])
hyperparams = {'covar':covar_parms,'lik':SP.log([1])}
#construct covariance function
SECF = GaussianKernel(feat_train, feat_train,2)
covar = SECF
zmean = ZeroMean();
inf = ExactInferenceMethod(SECF, feat_train, zmean, labels, likelihood);
gp = GaussianProcessRegression(inf, feat_train, labels);
root=ModelSelectionParameters();
c1=ModelSelectionParameters("inference_method", inf);
root.append_child(c1);
c2 = ModelSelectionParameters("scale");
c1.append_child(c2);
c2.build_values(0.01, 4.0, R_LINEAR);
c3 = ModelSelectionParameters("likelihood_model", likelihood);
c1.append_child(c3);
c4=ModelSelectionParameters("sigma");
c3.append_child(c4);
c4.build_values(0.001, 4.0, R_LINEAR);
c5 =ModelSelectionParameters("kernel", SECF);
c1.append_child(c5);
c6 =ModelSelectionParameters("width");
c5.append_child(c6);
c6.build_values(0.001, 4.0, R_LINEAR);
crit = GradientCriterion();
grad=GradientEvaluation(gp, feat_train, labels,
crit);
grad.set_function(inf);
gp.print_modsel_params();
root.print_tree();
grad_search=GradientModelSelection(
root, grad);
grad.set_autolock(0);
best_combination=grad_search.select_model(1);
gp.set_return_type(GaussianProcessRegression.GP_RETURN_COV);
St = gp.apply_regression(feat_train);
St = St.get_labels();
gp.set_return_type(GaussianProcessRegression.GP_RETURN_MEANS);
M = gp.apply_regression();
M = M.get_labels();
#create plots
plot_sausage(transpose(x),transpose(M),transpose(SP.sqrt(St)));
plot_training_data(x,y);
PL.show();
if __name__ == '__main__':
run_demo()
| gpl-3.0 |
kelle/astropy | astropy/visualization/lupton_rgb.py | 4 | 12745 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Combine 3 images to produce a properly-scaled RGB image following Lupton et al. (2004).
The three images must be aligned and have the same pixel scale and size.
For details, see : http://adsabs.harvard.edu/abs/2004PASP..116..133L
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from . import ZScaleInterval
__all__ = ['make_lupton_rgb']
def compute_intensity(image_r, image_g=None, image_b=None):
"""
Return a naive total intensity from the red, blue, and green intensities.
Parameters
----------
image_r : `~numpy.ndarray`
Intensity of image to be mapped to red; or total intensity if ``image_g``
and ``image_b`` are None.
image_g : `~numpy.ndarray`, optional
Intensity of image to be mapped to green.
image_b : `~numpy.ndarray`, optional
Intensity of image to be mapped to blue.
Returns
-------
intensity : `~numpy.ndarray`
Total intensity from the red, blue and green intensities, or ``image_r``
if green and blue images are not provided.
"""
if image_g is None or image_b is None:
if not (image_g is None and image_b is None):
raise ValueError("please specify either a single image "
"or red, green, and blue images.")
return image_r
intensity = (image_r + image_g + image_b)/3.0
# Repack into whatever type was passed to us
return np.asarray(intensity, dtype=image_r.dtype)
class Mapping(object):
"""
Baseclass to map red, blue, green intensities into uint8 values.
Parameters
----------
minimum : float or sequence(3)
Intensity that should be mapped to black (a scalar or array for R, G, B).
image : `~numpy.ndarray`, optional
An image used to calculate some parameters of some mappings.
"""
def __init__(self, minimum=None, image=None):
self._uint8Max = float(np.iinfo(np.uint8).max)
try:
len(minimum)
except TypeError:
minimum = 3*[minimum]
if len(minimum) != 3:
raise ValueError("please provide 1 or 3 values for minimum.")
self.minimum = minimum
self._image = np.asarray(image)
def make_rgb_image(self, image_r, image_g, image_b):
"""
Convert 3 arrays, image_r, image_g, and image_b into an 8-bit RGB image.
Parameters
----------
image_r : `~numpy.ndarray`
Image to map to red.
image_g : `~numpy.ndarray`
Image to map to green.
image_b : `~numpy.ndarray`
Image to map to blue.
Returns
-------
RGBimage : `~numpy.ndarray`
RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array.
"""
image_r = np.asarray(image_r)
image_g = np.asarray(image_g)
image_b = np.asarray(image_b)
if (image_r.shape != image_g.shape) or (image_g.shape != image_b.shape):
msg = "The image shapes must match. r: {}, g: {} b: {}"
raise ValueError(msg.format(image_r.shape, image_g.shape, image_b.shape))
return np.dstack(self._convert_images_to_uint8(image_r, image_g, image_b)).astype(np.uint8)
def intensity(self, image_r, image_g, image_b):
"""
Return the total intensity from the red, blue, and green intensities.
This is a naive computation, and may be overridden by subclasses.
Parameters
----------
image_r : `~numpy.ndarray`
Intensity of image to be mapped to red; or total intensity if
``image_g`` and ``image_b`` are None.
image_g : `~numpy.ndarray`, optional
Intensity of image to be mapped to green.
image_b : `~numpy.ndarray`, optional
Intensity of image to be mapped to blue.
Returns
-------
intensity : `~numpy.ndarray`
Total intensity from the red, blue and green intensities, or
``image_r`` if green and blue images are not provided.
"""
return compute_intensity(image_r, image_g, image_b)
def map_intensity_to_uint8(self, I):
"""
Return an array which, when multiplied by an image, returns that image
mapped to the range of a uint8, [0, 255] (but not converted to uint8).
The intensity is assumed to have had minimum subtracted (as that can be
done per-band).
Parameters
----------
I : `~numpy.ndarray`
Intensity to be mapped.
Returns
-------
mapped_I : `~numpy.ndarray`
``I`` mapped to uint8
"""
with np.errstate(invalid='ignore', divide='ignore'):
return np.clip(I, 0, self._uint8Max)
def _convert_images_to_uint8(self, image_r, image_g, image_b):
"""Use the mapping to convert images image_r, image_g, and image_b to a triplet of uint8 images"""
image_r = image_r - self.minimum[0] # n.b. makes copy
image_g = image_g - self.minimum[1]
image_b = image_b - self.minimum[2]
fac = self.map_intensity_to_uint8(self.intensity(image_r, image_g, image_b))
image_rgb = [image_r, image_g, image_b]
for c in image_rgb:
c *= fac
c[c < 0] = 0 # individual bands can still be < 0, even if fac isn't
pixmax = self._uint8Max
r0, g0, b0 = image_rgb # copies -- could work row by row to minimise memory usage
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
for i, c in enumerate(image_rgb):
c = np.where(r0 > g0,
np.where(r0 > b0,
np.where(r0 >= pixmax, c*pixmax/r0, c),
np.where(b0 >= pixmax, c*pixmax/b0, c)),
np.where(g0 > b0,
np.where(g0 >= pixmax, c*pixmax/g0, c),
np.where(b0 >= pixmax, c*pixmax/b0, c))).astype(np.uint8)
c[c > pixmax] = pixmax
image_rgb[i] = c
return image_rgb
class LinearMapping(Mapping):
"""
A linear map map of red, blue, green intensities into uint8 values.
A linear stretch from [minimum, maximum].
If one or both are omitted use image min and/or max to set them.
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
maximum : float
Intensity that should be mapped to white (a scalar).
"""
def __init__(self, minimum=None, maximum=None, image=None):
if minimum is None or maximum is None:
if image is None:
raise ValueError("you must provide an image if you don't "
"set both minimum and maximum")
if minimum is None:
minimum = image.min()
if maximum is None:
maximum = image.max()
Mapping.__init__(self, minimum=minimum, image=image)
self.maximum = maximum
if maximum is None:
self._range = None
else:
if maximum == minimum:
raise ValueError("minimum and maximum values must not be equal")
self._range = float(maximum - minimum)
def map_intensity_to_uint8(self, I):
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
return np.where(I <= 0, 0,
np.where(I >= self._range, self._uint8Max/I, self._uint8Max/self._range))
class AsinhMapping(Mapping):
"""
A mapping for an asinh stretch (preserving colours independent of brightness)
x = asinh(Q (I - minimum)/stretch)/Q
This reduces to a linear stretch if Q == 0
See http://adsabs.harvard.edu/abs/2004PASP..116..133L
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
stretch : float
The linear stretch of the image.
Q : float
The asinh softening parameter.
"""
def __init__(self, minimum, stretch, Q=8):
Mapping.__init__(self, minimum)
epsilon = 1.0/2**23 # 32bit floating point machine epsilon; sys.float_info.epsilon is 64bit
if abs(Q) < epsilon:
Q = 0.1
else:
Qmax = 1e10
if Q > Qmax:
Q = Qmax
frac = 0.1 # gradient estimated using frac*stretch is _slope
self._slope = frac*self._uint8Max/np.arcsinh(frac*Q)
self._soften = Q/float(stretch)
def map_intensity_to_uint8(self, I):
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
return np.where(I <= 0, 0, np.arcsinh(I*self._soften)*self._slope/I)
class AsinhZScaleMapping(AsinhMapping):
"""
A mapping for an asinh stretch, estimating the linear stretch by zscale.
x = asinh(Q (I - z1)/(z2 - z1))/Q
Parameters
----------
image1 : `~numpy.ndarray` or a list of arrays
The image to analyse, or a list of 3 images to be converted to
an intensity image.
image2 : `~numpy.ndarray`, optional
the second image to analyse (must be specified with image3).
image3 : `~numpy.ndarray`, optional
the third image to analyse (must be specified with image2).
Q : float, optional
The asinh softening parameter. Default is 8.
pedestal : float or sequence(3), optional
The value, or array of 3 values, to subtract from the images; or None.
Notes
-----
pedestal, if not None, is removed from the images when calculating the
zscale stretch, and added back into Mapping.minimum[]
"""
def __init__(self, image1, image2=None, image3=None, Q=8, pedestal=None):
"""
"""
if image2 is None or image3 is None:
if not (image2 is None and image3 is None):
raise ValueError("please specify either a single image "
"or three images.")
image = [image1]
else:
image = [image1, image2, image3]
if pedestal is not None:
try:
len(pedestal)
except TypeError:
pedestal = 3*[pedestal]
if len(pedestal) != 3:
raise ValueError("please provide 1 or 3 pedestals.")
image = list(image) # needs to be mutable
for i, im in enumerate(image):
if pedestal[i] != 0.0:
image[i] = im - pedestal[i] # n.b. a copy
else:
pedestal = len(image)*[0.0]
image = compute_intensity(*image)
zscale_limits = ZScaleInterval().get_limits(image)
zscale = LinearMapping(*zscale_limits, image=image)
stretch = zscale.maximum - zscale.minimum[0] # zscale.minimum is always a triple
minimum = zscale.minimum
for i, level in enumerate(pedestal):
minimum[i] += level
AsinhMapping.__init__(self, minimum, stretch, Q)
self._image = image
def make_lupton_rgb(image_r, image_g, image_b, minimum=0, stretch=5, Q=8,
filename=None):
"""
Return a Red/Green/Blue color image from up to 3 images using an asinh stretch.
The input images can be int or float, and in any range or bit-depth.
For a more detailed look at the use of this method, see the document
:ref:`astropy-visualization-rgb`.
Parameters
----------
image_r : `~numpy.ndarray`
Image to map to red.
image_g : `~numpy.ndarray`
Image to map to green.
image_b : `~numpy.ndarray`
Image to map to blue.
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
stretch : float
The linear stretch of the image.
Q : float
The asinh softening parameter.
filename: str
Write the resulting RGB image to a file (file type determined
from extension).
Returns
-------
rgb : `~numpy.ndarray`
RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array.
"""
asinhMap = AsinhMapping(minimum, stretch, Q)
rgb = asinhMap.make_rgb_image(image_r, image_g, image_b)
if filename:
import matplotlib.image
matplotlib.image.imsave(filename, rgb, origin='lower')
return rgb
| bsd-3-clause |
flightgong/scikit-learn | sklearn/tests/test_random_projection.py | 5 | 13190 | from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import (
johnson_lindenstrauss_min_dim,
gaussian_random_matrix,
sparse_random_matrix,
SparseRandomProjection,
GaussianRandomProjection)
from sklearn.utils.testing import (
assert_less,
assert_raises,
assert_raise_message,
assert_array_equal,
assert_equal,
assert_almost_equal,
assert_in,
assert_array_almost_equal)
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
"""Check basic properties of random matrix generation"""
for random_matrix in all_random_matrix:
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
for random_matrix in all_sparse_random_matrix:
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
"""Check some statical properties of Gaussian random matrix"""
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
"""Check some statical properties of sparse random matrix"""
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
RandomProjection(n_components=n_features + 1).fit(data)
assert_equal(len(w), 1)
assert issubclass(w[-1].category, UserWarning)
| bsd-3-clause |
timestocome/Test-stock-prediction-algorithms | Curves, Markov and Bayes/LevelData.py | 2 | 3693 |
# http://github.com/timestocome
# Level data so series is stationary in time
# take log of data
# save it to use in deconstructing signal to find anomolies
# Using finance.yahoo.com Nasdaq, S&P, DJI 1985 - date (Nov 29 2017)
#
# https://blog.statsbot.co/time-series-anomaly-detection-algorithms-1cef5519aef2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# pandas display options
pd.options.display.max_rows = 10000
pd.options.display.max_columns = 25
pd.options.display.width = 1000
######################################################################
# plot dataframe
########################################################################
def plot_dataframe(d, t):
plt.figure(figsize=(18,18))
plt.plot(d['NASDAQ'], label='NASDAQ')
plt.plot(d['S&P'], label='S&P')
plt.plot(d['DJIA'], label='DJIA')
plt.plot(d['BTC'], label='BTC')
plt.plot(d['Russell'], label='Russell')
plt.title(t)
plt.legend(loc='best')
plt.show()
######################################################################
# data
########################################################################
# read in datafile created in LoadAndMatchDates.py
data = pd.read_csv('StockDataWithVolume.csv', index_col='Date', parse_dates=True)
features = ['DJIA', 'S&P', 'NASDAQ', 'Russell', 'BTC']
# fill in a couple NaN
#data.dropna()
data = data.fillna(method='ffill')
#########################################################################################
# level the series out, time series calculations all assume signal is stationary in time
########################################################################################
# pandas removed ols package !#&^*@$
# need y intercept, b
# and slope, m
# y = mx + b
# using simplest case possible
#
# how to get x, y just in case you want to put this into an ordinary least squares package
# for better slope/intercept numbers
# This is close enough for proof of concept
#
# x = list(range(1, len(data)))
# y = data
# not really ols, but close enough
def ols(data):
m = (data[-1] - data[0]) / len(data)
b = data[0]
print(data[-1], data[0], (data[-1] - data[0]))
print(m, b)
print('-----------------------')
return m, b
# add a time step
steps = np.asarray(range(1, len(data)+1))
steps.reshape(1, -1)
data['step'] = steps
# NASDAQ
data['log NASDAQ'] = np.log(data['NASDAQ'])
m, b = ols(data['log NASDAQ'])
data['leveled log Nasdaq'] = data['log NASDAQ'] - (b + data['step'] * m)
# S&P
data['log S&P'] = np.log(data['S&P'])
m, b = ols(data['log S&P'])
data['leveled log S&P'] = data['log S&P'] - (b + data['step'] * m)
# DJIA
data['log DJIA'] = np.log(data['DJIA'])
m, b = ols(data['log DJIA'])
data['leveled log DJIA'] = data['log DJIA'] - (b + data['step'] * m)
# BTC
data['log BTC'] = np.log(data['BTC'])
m, b = ols(data['log BTC'])
data['leveled log BTC'] = data['log BTC'] - (b + data['step'] * m)
# Russell
data['log Russell'] = np.log(data['Russell'])
m, b = ols(data['log Russell'])
data['leveled log Russell'] = data['log Russell'] - (b + data['step'] * m)
#print(data.columns.values)
data = data[['leveled log Nasdaq','leveled log S&P', 'leveled log DJIA', 'leveled log Russell', 'leveled log BTC']]
# save data
data.to_csv('LeveledLogStockData.csv')
# plot to make sure things look ok
plt.figure(figsize=(12,12))
plt.plot(data['leveled log Nasdaq'], label='NASDAQ')
plt.plot(data['leveled log S&P'], label='S&P')
plt.plot(data['leveled log DJIA'], label='DJIA')
plt.plot(data['leveled log BTC'], label='BTC')
plt.plot(data['leveled log Russell'], label='Russell')
plt.legend(loc='best')
plt.show()
| mit |
ThomasSweijen/TPF | examples/simple-scene/simple-scene-plot.py | 8 | 2026 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
NewtonIntegrator(damping=.2,gravity=(0,0,-9.81)),
###
### NOTE this extra engine:
###
### You want snapshot to be taken every 1 sec (realTimeLim) or every 50 iterations (iterLim),
### whichever comes soones. virtTimeLim attribute is unset, hence virtual time period is not taken into account.
PyRunner(iterPeriod=20,command='myAddPlotData()')
]
O.bodies.append(box(center=[0,0,0],extents=[.5,.5,.5],fixed=True,color=[1,0,0]))
O.bodies.append(sphere([0,0,2],1,color=[0,1,0]))
O.dt=.002*PWaveTimeStep()
############################################
##### now the part pertaining to plots #####
############################################
from yade import plot
## we will have 2 plots:
## 1. t as function of i (joke test function)
## 2. i as function of t on left y-axis ('|||' makes the separation) and z_sph, v_sph (as green circles connected with line) and z_sph_half again as function of t
plot.plots={'i':('t'),'t':('z_sph',None,('v_sph','go-'),'z_sph_half')}
## this function is called by plotDataCollector
## it should add data with the labels that we will plot
## if a datum is not specified (but exists), it will be NaN and will not be plotted
def myAddPlotData():
sph=O.bodies[1]
## store some numbers under some labels
plot.addData(t=O.time,i=O.iter,z_sph=sph.state.pos[2],z_sph_half=.5*sph.state.pos[2],v_sph=sph.state.vel.norm())
print "Now calling plot.plot() to show the figures. The timestep is artificially low so that you can watch graphs being updated live."
plot.liveInterval=.2
plot.plot(subPlots=False)
O.run(int(2./O.dt));
#plot.saveGnuplot('/tmp/a')
## you can also access the data in plot.data['i'], plot.data['t'] etc, under the labels they were saved.
| gpl-2.0 |
fberanizo/author-profiling | classifiers/evaluation.py | 1 | 3924 | # -*- coding: utf-8 -*-
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report, accuracy_score, precision_recall_fscore_support
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.combine import SMOTEENN, SMOTETomek
class Evaluation:
"""Classifier evaluator."""
def __init__(self, sampler="random_under_sampler"):
self.sampler = sampler
def run(self, classifier, param_grid, X, y):
"""Performs classifier evaluation."""
print('Evaluating ' + type(classifier).__name__)
# Split the dataset in train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
sampler = self.get_sampler()
X_train, y_train = sampler.fit_sample(X_train, y_train)
scores = ['f1'] if len(set(y_test)) <= 2 else ['f1_weighted']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print("")
skf = StratifiedKFold(n_splits=5)
clf = GridSearchCV(estimator=classifier, param_grid=param_grid, scoring=score, cv=skf, verbose=0, n_jobs=2)
clf.fit(X_train, y_train)
print("Grid scores on validation set:")
print("")
results = dict(filter(lambda i:i[0] in ["params", "test_mean_score", "test_std_score", "test_rank_score"], clf.cv_results_.items()))
table = dict()
for key, val in results.items():
table[key] = val
print(table)
print("Best parameters set found on validation set:")
print("")
print(clf.best_params_)
print("")
print("")
print("Scores on test set (using best parameters):")
print("")
y_true, y_pred = y_test, clf.predict(X_test)
target_names = list(map(str, np.unique(y_true).tolist()))
print(classification_report(y_true, y_pred))
avg_accuracy = accuracy_score(y_true, y_pred)
print("")
print("Average accuracy on test set (using best parameters): %.2f" % avg_accuracy)
print("")
(precision, recall, f1_score, support) = precision_recall_fscore_support(y_true, y_pred)
print("===================================================================")
print(precision)
print("===================================================================")
average = 'binary' if len(set(y)) <= 2 else 'weighted'
(avg_precision, avg_recall, avg_f1_score, avg_support) = precision_recall_fscore_support(y_true, y_pred, average=average)
accuracy = []
for target in target_names:
accuracy.append(accuracy_score(y_true[np.where(y_true == np.int_(target))[0]], y_pred[np.where(y_true == np.int_(target))[0]]))
target_names.append('avg')
accuracy = np.append(accuracy, avg_accuracy)
precision = np.append(precision, avg_precision)
recall = np.append(recall, avg_recall)
f1_score = np.append(f1_score, avg_f1_score)
return target_names, accuracy, precision, recall, f1_score
def get_sampler(self):
"""Returns sampler method."""
if self.sampler == "random_under_sampler":
return RandomUnderSampler()
if self.sampler == "random_over_sampler":
return RandomOverSampler()
elif self.sampler == "SMOTE":
return SMOTE()
elif self.sampler == "SMOTEENN":
return SMOTEENN
elif self.sampler == "SMOTETomek":
return SMOTETomek
return None | bsd-2-clause |
warmspringwinds/scikit-image | skimage/viewer/plugins/color_histogram.py | 5 | 3279 | import numpy as np
import matplotlib.pyplot as plt
from ... import color, exposure
from .plotplugin import PlotPlugin
from ..canvastools import RectangleTool
class ColorHistogram(PlotPlugin):
name = 'Color Histogram'
def __init__(self, max_pct=0.99, **kwargs):
super(ColorHistogram, self).__init__(height=400, **kwargs)
self.max_pct = max_pct
print(self.help())
def attach(self, image_viewer):
super(ColorHistogram, self).attach(image_viewer)
self.rect_tool = RectangleTool(image_viewer,
on_release=self.ab_selected)
self._on_new_image(image_viewer.image)
def _on_new_image(self, image):
self.lab_image = color.rgb2lab(image)
# Calculate color histogram in the Lab colorspace:
L, a, b = self.lab_image.T
left, right = -100, 100
ab_extents = [left, right, right, left]
self.mask = np.ones(L.shape, bool)
bins = np.arange(left, right)
hist, x_edges, y_edges = np.histogram2d(a.flatten(), b.flatten(),
bins, normed=True)
self.data = {'bins': bins, 'hist': hist, 'edges': (x_edges, y_edges),
'extents': (left, right, left, right)}
# Clip bin heights that dominate a-b histogram
max_val = pct_total_area(hist, percentile=self.max_pct)
hist = exposure.rescale_intensity(hist, in_range=(0, max_val))
self.ax.imshow(hist, extent=ab_extents, cmap=plt.cm.gray)
self.ax.set_title('Color Histogram')
self.ax.set_xlabel('b')
self.ax.set_ylabel('a')
def help(self):
helpstr = ("Color Histogram tool:",
"Select region of a-b colorspace to highlight on image.")
return '\n'.join(helpstr)
def ab_selected(self, extents):
x0, x1, y0, y1 = extents
self.data['extents'] = extents
lab_masked = self.lab_image.copy()
L, a, b = lab_masked.T
self.mask = ((a > y0) & (a < y1)) & ((b > x0) & (b < x1))
lab_masked[..., 1:][~self.mask.T] = 0
self.image_viewer.image = color.lab2rgb(lab_masked)
def output(self):
"""Return the image mask and the histogram data.
Returns
-------
mask : array of bool, same shape as image
The selected pixels.
data : dict
The data describing the histogram and the selected region.
The dictionary contains:
- 'bins' : array of float
The bin boundaries for both `a` and `b` channels.
- 'hist' : 2D array of float
The normalized histogram.
- 'edges' : tuple of array of float
The bin edges along each dimension
- 'extents' : tuple of float
The left and right and top and bottom of the selected region.
"""
return (self.mask, self.data)
def pct_total_area(image, percentile=0.80):
"""Return threshold value based on percentage of total area.
The specified percent of pixels less than the given intensity threshold.
"""
idx = int((image.size - 1) * percentile)
sorted_pixels = np.sort(image.flat)
return sorted_pixels[idx]
| bsd-3-clause |
ryfeus/lambda-packs | Shapely_numpy/source/numpy/core/tests/test_multiarray.py | 11 | 246923 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import ctypes
import os
import gc
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_warns,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT,
assert_array_less, runstring, dec, SkipTest, temppath, suppress_warnings
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
# New in 1.12: This behavior changes in 1.13, test for dep warning
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
with assert_warns(FutureWarning):
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, a.prod)
self.assertRaises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(np.dot(eaf, eaf), eaf)
assert_equal(np.dot(eaf.T, eaf), eaf)
assert_equal(np.dot(eaf, eaf.T), eaf)
assert_equal(np.dot(eaf.T, eaf.T), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf), eaf)
assert_equal(np.dot(eaf, eaf.T.copy()), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(np.dot(ebf, ebf), eaf)
assert_equal(np.dot(ebf.T, ebf), eaf)
assert_equal(np.dot(ebf, ebf.T), eaf)
assert_equal(np.dot(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
np.dot(edf[::-1, :], edf.T),
np.dot(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf[:, ::-1], edf.T),
np.dot(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf, edf[::-1, :].T),
np.dot(edf, edf[::-1, :].T.copy())
)
assert_equal(
np.dot(edf, edf[:, ::-1].T),
np.dot(edf, edf[:, ::-1].T.copy())
)
assert_equal(
np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(np.dot(edf, edf.T), eddtf)
assert_equal(np.dot(edf.T, edf), edtdf)
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert isinstance(t, MyArray)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
if i < len(inputs):
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
if ('out' in kw) and (kw['out'] is not None):
kw['out'] = np.asarray(kw['out'])
r = func(*inputs, **kw)
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_numpy_ufunc_index(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
class CheckIndex(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return i
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), 1)
assert_equal(np.sin(dummy, out=a), 1)
assert_equal(np.sin(dummy, out=(a,)), 1)
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), 1)
assert_equal(np.modf(dummy, None, a), 2)
assert_equal(np.modf(dummy, dummy, a), 2)
assert_equal(np.modf(dummy, out=a), 1)
assert_equal(np.modf(dummy, out=(a,)), 1)
assert_equal(np.modf(dummy, out=(a, None)), 1)
assert_equal(np.modf(dummy, out=(a, dummy)), 1)
assert_equal(np.modf(dummy, out=(None, a)), 2)
assert_equal(np.modf(dummy, out=(dummy, a)), 2)
assert_equal(np.modf(a, out=(dummy, a)), 0)
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), 2)
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), 2)
assert_equal(np.add(dummy, dummy, out=(a,)), 2)
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# regression test for github bug 4753
class OutClass(np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2, 3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress(TestCase):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
assert_equal(x.dtype, T)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(ValueError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(IOError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(IOError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unbuffered_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise io.IOError('Can not tell or seek')
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(self.filename)
assert_equal(os.path.getsize(self.filename), d.nbytes)
assert_array_equal(d, np.fromfile(self.filename))
# check offset
with open(self.filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
# check append mode (gh-8329)
open(self.filename, "w").close() # delete file contents
with open(self.filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(self.filename))
with open(self.filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is True)
assert_(c.flags.updateifcopy is False)
assert_(d.flags.updateifcopy is False)
assert_(e.flags.updateifcopy is False)
assert_(f.flags.updateifcopy is True)
assert_(f.base is self.b0)
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_assign():
dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_assign)
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
def test_multiple_field_name_unicode(self):
def test_assign_unicode():
dt = np.dtype([("\u20B9", "f8"),
("B", "f8"),
("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_assign_unicode)
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
with suppress_warnings() as sup:
sup.filter(FutureWarning,
"Assignment between structured arrays.*")
sup.filter(FutureWarning,
"Numpy has detected that you .*")
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(),
(2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(),
(3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(),
(2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
# For <=1.12 a is not modified, but it will be in 1.13
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
# make sure views of a multi-field index warn too
c = np.zeros(3, dtype='i8,i8,i8')
assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'),
[FutureWarning])
# make sure assignment using a different dtype warns
a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
b = np.zeros(2, dtype=[('b', 'i4'), ('a', 'i4')])
assert_equal(collect_warnings(a.__setitem__, (), b), [FutureWarning])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
assert _mean(np.ones(100000, dtype='float16')) == 1
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_inner_scalar_and_matrix(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
assert_equal(np.inner(arr, sca), desired)
assert_equal(np.inner(sca, arr), desired)
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2,3,4).astype(dt)
b = np.arange(24, 48).reshape(2,3,4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]],
dtype=dt
)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestAlen(TestCase):
def test_basic(self):
m = np.array([1, 2, 3])
self.assertEqual(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
self.assertEqual(np.alen(m), 2)
m = [1, 2, 3]
self.assertEqual(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
self.assertEqual(np.alen(m), 2)
def test_singleton(self):
self.assertEqual(np.alen(5), 1)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
class TestRepeat(TestCase):
def setUp(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
def test_basic(self):
A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
def test_broadcast1(self):
A = np.repeat(self.m, 2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
def test_axis_spec(self):
A = np.repeat(self.m_rect, [2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
def test_broadcast2(self):
A = np.repeat(self.m_rect, 2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = np.repeat(self.m_rect, 2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
if HAS_REFCOUNT:
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
if HAS_REFCOUNT:
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
gc.collect()
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
# Unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
# See #7949. Dont use "/" operator With -3 switch, since python reports it
# as a DeprecationWarning
if sys.version_info[0] < 3 and not sys.py3kwarning:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
if __name__ == "__main__":
run_module_suite()
| mit |
stoneflyop1/py_machine_learning | ch04/miss.py | 1 | 1048 | import pandas as pd
from io import StringIO
csv_data = '''A,B,C,D
1.0,2.0,3.0,4.0
5.0,6.0,,8.0
10.0,11.0,12.0,'''
## for python2
# csv_data = unicode(csv_data)
df = pd.read_csv(StringIO(csv_data))
print('#'*60)
print('# show data with missing values')
print(df)
print('# isnull: ', df.isnull().sum()) # convert to boolean false values using dataframe isnull method
print(df.values) # get numpy array from dataframe values
print('#'*60)
print('# drop NaN row or col')
print('########## drop row:\r\n', df.dropna()) # axis=0
print('########## drop col:\r\n', df.dropna(axis=1))
print('########## drop all col is NaN:\r\n', df.dropna(how='all'))
print('########## drop with threshold:\r\n', df.dropna(thresh=4))
print('########## drop specific cols:\r\n', df.dropna(subset=['C']))
print('#'*60)
print('# mean impute missing values')
from sklearn.preprocessing import Imputer
imr = Imputer(missing_values="NaN", strategy="mean", axis=0)
imr = imr.fit(df)
imputed_data = imr.transform(df.values)
print(imputed_data)
| mit |
sdu-cfei/modest-py | examples/simple/simple_1param.py | 1 | 2449 | """
Copyright (c) 2017, University of Southern Denmark
All rights reserved.
This code is licensed under BSD 2-clause license.
See LICENSE file in the project root for license terms.
"""
import json
import os
import pandas as pd
from modestpy import Estimation
from modestpy.utilities.sysarch import get_sys_arch
if __name__ == "__main__":
"""
This file is supposed to be run from the root directory.
Otherwise the paths have to be corrected.
"""
# DATA PREPARATION ==============================================
# Resources
platform = get_sys_arch()
assert platform, 'Unsupported platform type!'
fmu_file = 'Simple2R1C_ic_' + platform + '.fmu'
fmu_path = os.path.join('examples', 'simple', 'resources', fmu_file)
inp_path = os.path.join('examples', 'simple', 'resources', 'inputs.csv')
ideal_path = os.path.join('examples', 'simple', 'resources', 'result.csv')
est_path = os.path.join('examples', 'simple', 'resources', 'est.json')
known_path = os.path.join('examples', 'simple', 'resources', 'known.json')
# Working directory
workdir = os.path.join('examples', 'simple', 'workdir')
if not os.path.exists(workdir):
os.mkdir(workdir)
assert os.path.exists(workdir), "Work directory does not exist"
# Load inputs
inp = pd.read_csv(inp_path).set_index('time')
# Load measurements (ideal results)
ideal = pd.read_csv(ideal_path).set_index('time')
# Load definition of estimated parameters (name, initial value, bounds)
with open(est_path) as f:
est = json.load(f)
del est['R1'] # We want to estimate only C
del est['R2'] # We want to estimate only C
# Load definition of known parameters (name, value)
with open(known_path) as f:
known = json.load(f)
known['R1'] = 0.1
known['R2'] = 0.25
# MODEL IDENTIFICATION ==========================================
session = Estimation(workdir, fmu_path, inp, known, est, ideal,
lp_n=2, lp_len=50000, lp_frame=(0, 50000),
vp=(0, 50000), ic_param={'Tstart': 'T'},
methods=('MODESTGA', 'PS'),
ps_opts={'maxiter': 500, 'tol': 1e-6},
scipy_opts={},
ftype='RMSE',
default_log=True, logfile='simple.log')
estimates = session.estimate()
err, res = session.validate()
| bsd-2-clause |
Jorge-C/bipy | doc/sphinxext/numpydoc/numpydoc/docscrape_sphinx.py | 41 | 9437 | from __future__ import division, absolute_import, print_function
import sys, re, inspect, textwrap, pydoc
import sphinx
import collections
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self):
out = []
if self['Returns']:
out += self._str_field_list('Returns')
out += ['']
for param, param_type, desc in self['Returns']:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
mwv/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
marcotcr/lime-experiments | explainers.py | 1 | 8763 | from abc import ABCMeta, abstractmethod
import numpy as np
import scipy as sp
from sklearn import linear_model
import sklearn.metrics.pairwise
###############################
## Random Explainer
###############################
class RandomExplainer:
def __init__(self):
pass
def reset(self):
pass
def explain_instance(self,
instance_vector,
label,
classifier,
num_features,
dataset):
nonzero = instance_vector.nonzero()[1]
explanation = np.random.choice(nonzero, num_features)
return [(x, 1) for x in explanation]
def explain(self,
train_vectors,
train_labels,
classifier,
num_features,
dataset):
i = np.random.randint(0, train_vectors.shape[0])
explanation = self.explain_instance(train_vectors[i], None, None,
num_features, dataset)
return i, explanation
###############################
## Standalone Explainers
###############################
def most_important_word(classifier, v, class_):
# Returns the word w that moves P(Y) - P(Y|NOT w) the most for class Y.
max_index = 0
max_change = -1
orig = classifier.predict_proba(v)[0][class_]
for i in v.nonzero()[1]:
val = v[0,i]
v[0,i] = 0
pred = classifier.predict_proba(v)[0][class_]
change = orig - pred
if change > max_change:
max_change = change
max_index = i
v[0,i] = val
if max_change < 0:
return -1
return max_index
def explain_greedy(instance_vector,
label,
classifier,
num_features,
dataset=None):
explanation = []
z = instance_vector.copy()
while len(explanation) < num_features:
i = most_important_word(classifier, z, label)
if i == -1:
break
z[0,i] = 0
explanation.append(i)
return [(x, 1) for x in explanation]
def most_important_word_martens(predict_fn, v, class_):
# Returns the word w that moves P(Y) - P(Y|NOT w) the most for class Y.
max_index = 0
max_change = -1
orig = predict_fn(v)[0,class_]
for i in v.nonzero()[1]:
val = v[0,i]
v[0,i] = 0
pred = predict_fn(v)[0,class_]
change = orig - pred
if change > max_change:
max_change = change
max_index = i
v[0,i] = val
if max_change < 0:
return -1, max_change
return max_index, max_change
def explain_greedy_martens(instance_vector,
label,
predict_fn,
num_features,
dataset=None):
if not hasattr(predict_fn, '__call__'):
predict_fn = predict_fn.predict_proba
explanation = []
z = instance_vector.copy()
cur_score = predict_fn(instance_vector)[0, label]
while len(explanation) < num_features:
i, change = most_important_word_martens(predict_fn, z, label)
cur_score -= change
if i == -1:
break
explanation.append(i)
if cur_score < .5:
break
z[0,i] = 0
return [(x, 1) for x in explanation]
def data_labels_distances_mapping_text(x, classifier_fn, num_samples):
distance_fn = lambda x : sklearn.metrics.pairwise.cosine_distances(x[0],x)[0] * 100
features = x.nonzero()[1]
vals = np.array(x[x.nonzero()])[0]
doc_size = len(sp.sparse.find(x)[2])
sample = np.random.randint(1, doc_size, num_samples - 1)
data = np.zeros((num_samples, len(features)))
inverse_data = np.zeros((num_samples, len(features)))
data[0] = np.ones(doc_size)
inverse_data[0] = vals
features_range = range(len(features))
for i, s in enumerate(sample, start=1):
active = np.random.choice(features_range, s, replace=False)
data[i, active] = 1
for j in active:
inverse_data[i, j] = 1
sparse_inverse = sp.sparse.lil_matrix((inverse_data.shape[0], x.shape[1]))
sparse_inverse[:, features] = inverse_data
sparse_inverse = sp.sparse.csr_matrix(sparse_inverse)
mapping = features
labels = classifier_fn(sparse_inverse)
distances = distance_fn(sparse_inverse)
return data, labels, distances, mapping
# This is LIME
class GeneralizedLocalExplainer:
def __init__(self,
kernel_fn,
data_labels_distances_mapping_fn,
num_samples=5000,
lasso=True,
mean=None,
return_mean=False,
return_mapped=False,
lambda_=None,
verbose=True,
positive=False):
# Transform_classifier, transform_explainer,
# transform_explainer_to_classifier all take raw data in, whatever that is.
# perturb(x, num_samples) returns data (perturbed data in f'(x) form),
# inverse_data (perturbed data in x form) and mapping, where mapping is such
# that mapping[i] = j, where j is an index for x form.
# distance_fn takes raw data in. what we're calling raw data is just x
self.lambda_ = lambda_
self.kernel_fn = kernel_fn
self.data_labels_distances_mapping_fn = data_labels_distances_mapping_fn
self.num_samples = num_samples
self.lasso = lasso
self.mean = mean
self.return_mapped=return_mapped
self.return_mean = return_mean
self.verbose = verbose
self.positive=positive;
def reset(self):
pass
def data_labels_distances_mapping(self, raw_data, classifier_fn):
data, labels, distances, mapping = self.data_labels_distances_mapping_fn(raw_data, classifier_fn, self.num_samples)
return data, labels, distances, mapping
def generate_lars_path(self, weighted_data, weighted_labels):
X = weighted_data
alphas, active, coefs = linear_model.lars_path(X, weighted_labels, method='lasso', verbose=False, positive=self.positive)
return alphas, coefs
def explain_instance_with_data(self, data, labels, distances, label, num_features):
weights = self.kernel_fn(distances)
weighted_data = data * weights[:, np.newaxis]
if self.mean is None:
mean = np.mean(labels[:, label])
else:
mean = self.mean
shifted_labels = labels[:, label] - mean
if self.verbose:
print 'mean', mean
weighted_labels = shifted_labels * weights
used_features = range(weighted_data.shape[1])
nonzero = used_features
alpha = 1
if self.lambda_:
classif = linear_model.Lasso(alpha=self.lambda_, fit_intercept=False, positive=self.positive)
classif.fit(weighted_data, weighted_labels)
used_features = classif.coef_.nonzero()[0]
if used_features.shape[0] == 0:
if self.return_mean:
return [], mean
else:
return []
elif self.lasso:
alphas, coefs = self.generate_lars_path(weighted_data, weighted_labels)
for i in range(len(coefs.T) - 1, 0, -1):
nonzero = coefs.T[i].nonzero()[0]
if len(nonzero) <= num_features:
chosen_coefs = coefs.T[i]
alpha = alphas[i]
break
used_features = nonzero
debiased_model = linear_model.Ridge(alpha=0, fit_intercept=False)
debiased_model.fit(weighted_data[:, used_features], weighted_labels)
if self.verbose:
print 'Prediction_local', debiased_model.predict(data[0, used_features].reshape(1, -1)) + mean, 'Right:', labels[0, label]
if self.return_mean:
return sorted(zip(used_features,
debiased_model.coef_),
key=lambda x:np.abs(x[1]), reverse=True), mean
else:
return sorted(zip(used_features,
debiased_model.coef_),
key=lambda x:np.abs(x[1]), reverse=True)
def explain_instance(self,
raw_data,
label,
classifier_fn,
num_features, dataset=None):
if not hasattr(classifier_fn, '__call__'):
classifier_fn = classifier_fn.predict_proba
data, labels, distances, mapping = self.data_labels_distances_mapping(raw_data, classifier_fn)
if self.return_mapped:
if self.return_mean:
exp, mean = self.explain_instance_with_data(data, labels, distances, label, num_features)
else:
exp = self.explain_instance_with_data(data, labels, distances, label, num_features)
exp = [(mapping[x[0]], x[1]) for x in exp]
if self.return_mean:
return exp, mean
else:
return exp
return self.explain_instance_with_data(data, labels, distances, label, num_features), mapping
| bsd-2-clause |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/matplotlib/collections.py | 1 | 77012 | """
Classes for the efficient drawing of large collections of objects that
share most properties, e.g., a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g., you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g., a large set of solid
line segments).
"""
import math
from numbers import Number
import numpy as np
import matplotlib as mpl
from . import (_path, artist, cbook, cm, colors as mcolors, docstring,
lines as mlines, path as mpath, transforms)
import warnings
@cbook._define_aliases({
"antialiased": ["antialiaseds", "aa"],
"edgecolor": ["edgecolors", "ec"],
"facecolor": ["facecolors", "fc"],
"linestyle": ["linestyles", "dashes", "ls"],
"linewidth": ["linewidths", "lw"],
})
class Collection(artist.Artist, cm.ScalarMappable):
r"""
Base class for Collections. Must be subclassed to be usable.
A Collection represents a sequence of `.Patch`\es that can be drawn
more efficiently together than individually. For example, when a single
path is being drawn repeatedly at different offsets, the renderer can
typically execute a ``draw_marker()`` call much more efficiently than a
series of repeated calls to ``draw_path()`` with the offsets put in
one-by-one.
Most properties of a collection can be configured per-element. Therefore,
Collections have "plural" versions of many of the properties of a `.Patch`
(e.g. `.Collection.get_paths` instead of `.Patch.get_path`). Exceptions are
the *zorder*, *hatch*, *pickradius*, *capstyle* and *joinstyle* properties,
which can only be set globally for the whole collection.
Besides these exceptions, all properties can be specified as single values
(applying to all elements) or sequences of values. The property of the
``i``\th element of the collection is::
prop[i % len(prop)]
Each Collection can optionally be used as its own `.ScalarMappable` by
passing the *norm* and *cmap* parameters to its constructor. If the
Collection's `.ScalarMappable` matrix ``_A`` has been set (via a call
to `.Collection.set_array`), then at draw time this internal scalar
mappable will be used to set the ``facecolors`` and ``edgecolors``,
ignoring those that were manually passed in.
"""
_offsets = np.zeros((0, 2))
_transOffset = transforms.IdentityTransform()
#: Either a list of 3x3 arrays or an Nx3x3 array (representing N
#: transforms), suitable for the `all_transforms` argument to
#: `~matplotlib.backend_bases.RendererBase.draw_path_collection`;
#: each 3x3 array is used to initialize an
#: `~matplotlib.transforms.Affine2D` object.
#: Each kind of collection defines this based on its arguments.
_transforms = np.empty((0, 3, 3))
# Whether to draw an edge by default. Set on a
# subclass-by-subclass basis.
_edge_default = False
@cbook._delete_parameter("3.3", "offset_position")
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
capstyle=None,
joinstyle=None,
antialiaseds=None,
offsets=None,
transOffset=None,
norm=None, # optional for ScalarMappable
cmap=None, # ditto
pickradius=5.0,
hatch=None,
urls=None,
offset_position='screen',
zorder=1,
**kwargs
):
"""
Parameters
----------
edgecolors : color or list of colors, default: :rc:`patch.edgecolor`
Edge color for each patch making up the collection. The special
value 'face' can be passed to make the edgecolor match the
facecolor.
facecolors : color or list of colors, default: :rc:`patch.facecolor`
Face color for each patch making up the collection.
linewidths : float or list of floats, default: :rc:`patch.linewidth`
Line width for each patch making up the collection.
linestyles : str or tuple or list thereof, default: 'solid'
Valid strings are ['solid', 'dashed', 'dashdot', 'dotted', '-',
'--', '-.', ':']. Dash tuples should be of the form::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink lengths
in points. For examples, see
:doc:`/gallery/lines_bars_and_markers/linestyles`.
capstyle : str, default: :rc:`patch.capstyle`
Style to use for capping lines for all paths in the collection.
See :doc:`/gallery/lines_bars_and_markers/joinstyle` for
a demonstration of each of the allowed values.
joinstyle : str, default: :rc:`patch.joinstyle`
Style to use for joining lines for all paths in the collection.
See :doc:`/gallery/lines_bars_and_markers/joinstyle` for
a demonstration of each of the allowed values.
antialiaseds : bool or list of bool, default: :rc:`patch.antialiased`
Whether each pach in the collection should be drawn with
antialiasing.
offsets : (float, float) or list thereof, default: (0, 0)
A vector by which to translate each patch after rendering (default
is no translation). The translation is performed in screen (pixel)
coordinates (i.e. after the Artist's transform is applied).
transOffset : `~.transforms.Transform`, default: `.IdentityTransform`
A single transform which will be applied to each *offsets* vector
before it is used.
offset_position : {'screen' (default), 'data' (deprecated)}
If set to 'data' (deprecated), *offsets* will be treated as if it
is in data coordinates instead of in screen coordinates.
norm : `~.colors.Normalize`, optional
Forwarded to `.ScalarMappable`. The default of
``None`` means that the first draw call will set ``vmin`` and
``vmax`` using the minimum and maximum values of the data.
cmap : `~.colors.Colormap`, optional
Forwarded to `.ScalarMappable`. The default of
``None`` will result in :rc:`image.cmap` being used.
hatch : str, optional
Hatching pattern to use in filled paths, if any. Valid strings are
['/', '\\', '|', '-', '+', 'x', 'o', 'O', '.', '*']. See
:doc:`/gallery/shapes_and_collections/hatch_demo` for the meaning
of each hatch type.
pickradius : float, default: 5.0
If ``pickradius <= 0``, then `.Collection.contains` will return
``True`` whenever the test point is inside of one of the polygons
formed by the control points of a Path in the Collection. On the
other hand, if it is greater than 0, then we instead check if the
test point is contained in a stroke of width ``2*pickradius``
following any of the Paths in the Collection.
urls : list of str, default: None
A URL for each patch to link to once drawn. Currently only works
for the SVG backend. See :doc:`/gallery/misc/hyperlinks_sgskip` for
examples.
zorder : float, default: 1
The drawing order, shared by all Patches in the Collection. See
:doc:`/gallery/misc/zorder_demo` for all defaults and examples.
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
# list of un-scaled dash patterns
# this is needed scaling the dash pattern by linewidth
self._us_linestyles = [(0, None)]
# list of dash patterns
self._linestyles = [(0, None)]
# list of unbroadcast/scaled linewidths
self._us_lw = [0]
self._linewidths = [0]
self._is_filled = True # May be modified by set_facecolor().
self._hatch_color = mcolors.to_rgba(mpl.rcParams['hatch.color'])
self.set_facecolor(facecolors)
self.set_edgecolor(edgecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_pickradius(pickradius)
self.set_urls(urls)
self.set_hatch(hatch)
self._offset_position = "screen"
if offset_position != "screen":
self.set_offset_position(offset_position) # emit deprecation.
self.set_zorder(zorder)
if capstyle:
self.set_capstyle(capstyle)
else:
self._capstyle = None
if joinstyle:
self.set_joinstyle(joinstyle)
else:
self._joinstyle = None
self._offsets = np.zeros((1, 2))
# save if offsets passed in were none...
self._offsetsNone = offsets is None
self._uniform_offsets = None
if offsets is not None:
offsets = np.asanyarray(offsets, float)
# Broadcast (2,) -> (1, 2) but nothing else.
if offsets.shape == (2,):
offsets = offsets[None, :]
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._path_effects = None
self.update(kwargs)
self._paths = None
def get_paths(self):
return self._paths
def set_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_offset_transform(self):
t = self._transOffset
if (not isinstance(t, transforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
return t
def get_datalim(self, transData):
# Calculate the data limits and return them as a `.Bbox`.
#
# This operation depends on the transforms for the data in the
# collection and whether the collection has offsets:
#
# 1. offsets = None, transform child of transData: use the paths for
# the automatic limits (i.e. for LineCollection in streamline).
# 2. offsets != None: offset_transform is child of transData:
#
# a. transform is child of transData: use the path + offset for
# limits (i.e for bar).
# b. transform is not a child of transData: just use the offsets
# for the limits (i.e. for scatter)
#
# 3. otherwise return a null Bbox.
transform = self.get_transform()
transOffset = self.get_offset_transform()
if (not self._offsetsNone and
not transOffset.contains_branch(transData)):
# if there are offsets but in some coords other than data,
# then don't use them for autoscaling.
return transforms.Bbox.null()
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
# Don't convert transform to transform.get_affine() here because
# we may have transform.contains_branch(transData) but not
# transforms.get_affine().contains_branch(transData). But later,
# be careful to only apply the affine part that remains.
if isinstance(offsets, np.ma.MaskedArray):
offsets = offsets.filled(np.nan)
# get_path_collection_extents handles nan but not masked arrays
if len(paths) and len(offsets):
if any(transform.contains_branch_seperately(transData)):
# collections that are just in data units (like quiver)
# can properly have the axes limits set by their shape +
# offset. LineCollections that have no offsets can
# also use this algorithm (like streamplot).
result = mpath.get_path_collection_extents(
transform.get_affine(), paths, self.get_transforms(),
transOffset.transform_non_affine(offsets),
transOffset.get_affine().frozen())
return result.transformed(transData.inverted())
if not self._offsetsNone:
# this is for collections that have their paths (shapes)
# in physical, axes-relative, or figure-relative units
# (i.e. like scatter). We can't uniquely set limits based on
# those shapes, so we just set the limits based on their
# location.
offsets = (transOffset - transData).transform(offsets)
# note A-B means A B^{-1}
offsets = np.ma.masked_invalid(offsets)
if not offsets.mask.all():
points = np.row_stack((offsets.min(axis=0),
offsets.max(axis=0)))
return transforms.Bbox(points)
return transforms.Bbox.null()
def get_window_extent(self, renderer):
# TODO: check to ensure that this does not fail for
# cases other than scatter plot legend
return self.get_datalim(transforms.IdentityTransform())
def _prepare_points(self):
# Helper for drawing and hit testing.
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(np.column_stack([xs, ys]), path.codes))
if offsets.size:
xs = self.convert_xunits(offsets[:, 0])
ys = self.convert_yunits(offsets[:, 1])
offsets = np.column_stack([xs, ys])
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path)
for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
# This might have changed an ndarray into a masked array.
transOffset = transOffset.get_affine()
if isinstance(offsets, np.ma.MaskedArray):
offsets = offsets.filled(np.nan)
# Changing from a masked array to nan-filled ndarray
# is probably most efficient at this point.
return transform, transOffset, offsets, paths
@artist.allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
self.update_scalarmappable()
transform, transOffset, offsets, paths = self._prepare_points()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_snap(self.get_snap())
if self._hatch:
gc.set_hatch(self._hatch)
gc.set_hatch_color(self._hatch_color)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
# If the collection is made up of a single shape/color/stroke,
# it can be rendered once and blitted multiple times, using
# `draw_markers` rather than `draw_path_collection`. This is
# *much* faster for Agg, and results in smaller file sizes in
# PDF/SVG/PS.
trans = self.get_transforms()
facecolors = self.get_facecolor()
edgecolors = self.get_edgecolor()
do_single_path_optimization = False
if (len(paths) == 1 and len(trans) <= 1 and
len(facecolors) == 1 and len(edgecolors) == 1 and
len(self._linewidths) == 1 and
all(ls[1] is None for ls in self._linestyles) and
len(self._antialiaseds) == 1 and len(self._urls) == 1 and
self.get_hatch() is None):
if len(trans):
combined_transform = transforms.Affine2D(trans[0]) + transform
else:
combined_transform = transform
extents = paths[0].get_extents(combined_transform)
if (extents.width < self.figure.bbox.width
and extents.height < self.figure.bbox.height):
do_single_path_optimization = True
if self._joinstyle:
gc.set_joinstyle(self._joinstyle)
if self._capstyle:
gc.set_capstyle(self._capstyle)
if do_single_path_optimization:
gc.set_foreground(tuple(edgecolors[0]))
gc.set_linewidth(self._linewidths[0])
gc.set_dashes(*self._linestyles[0])
gc.set_antialiased(self._antialiaseds[0])
gc.set_url(self._urls[0])
renderer.draw_markers(
gc, paths[0], combined_transform.frozen(),
mpath.Path(offsets), transOffset, tuple(facecolors[0]))
else:
renderer.draw_path_collection(
gc, transform.frozen(), paths,
self.get_transforms(), offsets, transOffset,
self.get_facecolor(), self.get_edgecolor(),
self._linewidths, self._linestyles,
self._antialiaseds, self._urls,
self._offset_position)
gc.restore()
renderer.close_group(self.__class__.__name__)
self.stale = False
def set_pickradius(self, pr):
"""
Set the pick radius used for containment tests.
Parameters
----------
d : float
Pick radius, in points.
"""
self._pickradius = pr
def get_pickradius(self):
return self._pickradius
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns ``bool, dict(ind=itemlist)``, where every item in itemlist
contains the event.
"""
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
if not self.get_visible():
return False, {}
pickradius = (
float(self._picker)
if isinstance(self._picker, Number) and
self._picker is not True # the bool, not just nonzero or 1
else self._pickradius)
if self.axes:
self.axes._unstale_viewLim()
transform, transOffset, offsets, paths = self._prepare_points()
# Tests if the point is contained on one of the polygons formed
# by the control points of each of the paths. A point is considered
# "on" a path if it would lie within a stroke of width 2*pickradius
# following the path. If pickradius <= 0, then we instead simply check
# if the point is *inside* of the path instead.
ind = _path.point_in_path_collection(
mouseevent.x, mouseevent.y, pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, pickradius <= 0,
self._offset_position)
return len(ind) > 0, dict(ind=ind)
def set_urls(self, urls):
"""
Parameters
----------
urls : list of str or None
Notes
-----
URLs are currently only implemented by the SVG backend. They are
ignored by all other backends.
"""
self._urls = urls if urls is not None else [None]
self.stale = True
def get_urls(self):
"""
Return a list of URLs, one for each element of the collection.
The list contains *None* for elements without a URL. See
:doc:`/gallery/misc/hyperlinks_sgskip` for an example.
"""
return self._urls
def set_hatch(self, hatch):
r"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
Unlike other properties such as linewidth and colors, hatching
can only be specified for the collection as a whole, not separately
for each member.
Parameters
----------
hatch : {'/', '\\', '|', '-', '+', 'x', 'o', 'O', '.', '*'}
"""
self._hatch = hatch
self.stale = True
def get_hatch(self):
"""Return the current hatching pattern."""
return self._hatch
def set_offsets(self, offsets):
"""
Set the offsets for the collection.
Parameters
----------
offsets : array-like (N, 2) or (2,)
"""
offsets = np.asanyarray(offsets, float)
if offsets.shape == (2,): # Broadcast (2,) -> (1, 2) but nothing else.
offsets = offsets[None, :]
# This decision is based on how they are initialized above in __init__.
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
self.stale = True
def get_offsets(self):
"""Return the offsets for the collection."""
# This decision is based on how they are initialized above in __init__.
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
@cbook.deprecated("3.3")
def set_offset_position(self, offset_position):
"""
Set how offsets are applied. If *offset_position* is 'screen'
(default) the offset is applied after the master transform has
been applied, that is, the offsets are in screen coordinates.
If offset_position is 'data', the offset is applied before the
master transform, i.e., the offsets are in data coordinates.
Parameters
----------
offset_position : {'screen', 'data'}
"""
cbook._check_in_list(['screen', 'data'],
offset_position=offset_position)
self._offset_position = offset_position
self.stale = True
@cbook.deprecated("3.3")
def get_offset_position(self):
"""
Return how offsets are applied for the collection. If
*offset_position* is 'screen', the offset is applied after the
master transform has been applied, that is, the offsets are in
screen coordinates. If offset_position is 'data', the offset
is applied before the master transform, i.e., the offsets are
in data coordinates.
"""
return self._offset_position
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
Parameters
----------
lw : float or list of floats
"""
if lw is None:
lw = mpl.rcParams['patch.linewidth']
if lw is None:
lw = mpl.rcParams['lines.linewidth']
# get the un-scaled/broadcast lw
self._us_lw = np.atleast_1d(np.asarray(lw))
# scale all of the dash patterns.
self._linewidths, self._linestyles = self._bcast_lwls(
self._us_lw, self._us_linestyles)
self.stale = True
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
=========================== =================
linestyle description
=========================== =================
``'-'`` or ``'solid'`` solid line
``'--'`` or ``'dashed'`` dashed line
``'-.'`` or ``'dashdot'`` dash-dotted line
``':'`` or ``'dotted'`` dotted line
=========================== =================
Alternatively a dash tuple of the following form can be provided::
(offset, onoffseq),
where ``onoffseq`` is an even length tuple of on and off ink in points.
Parameters
----------
ls : str or tuple or list thereof
Valid values for individual linestyles include {'-', '--', '-.',
':', '', (offset, on-off-seq)}. See `.Line2D.set_linestyle` for a
complete description.
"""
try:
if isinstance(ls, str):
ls = cbook.ls_mapper.get(ls, ls)
dashes = [mlines._get_dash_pattern(ls)]
else:
try:
dashes = [mlines._get_dash_pattern(ls)]
except ValueError:
dashes = [mlines._get_dash_pattern(x) for x in ls]
except ValueError as err:
raise ValueError('Do not know how to convert {!r} to '
'dashes'.format(ls)) from err
# get the list of raw 'unscaled' dash patterns
self._us_linestyles = dashes
# broadcast and scale the lw and dash patterns
self._linewidths, self._linestyles = self._bcast_lwls(
self._us_lw, self._us_linestyles)
def set_capstyle(self, cs):
"""
Set the capstyle for the collection (for all its elements).
Parameters
----------
cs : {'butt', 'round', 'projecting'}
The capstyle.
"""
mpl.rcsetup.validate_capstyle(cs)
self._capstyle = cs
def get_capstyle(self):
return self._capstyle
def set_joinstyle(self, js):
"""
Set the joinstyle for the collection (for all its elements).
Parameters
----------
js : {'miter', 'round', 'bevel'}
The joinstyle.
"""
mpl.rcsetup.validate_joinstyle(js)
self._joinstyle = js
def get_joinstyle(self):
return self._joinstyle
@staticmethod
def _bcast_lwls(linewidths, dashes):
"""
Internal helper function to broadcast + scale ls/lw
In the collection drawing code, the linewidth and linestyle are cycled
through as circular buffers (via ``v[i % len(v)]``). Thus, if we are
going to scale the dash pattern at set time (not draw time) we need to
do the broadcasting now and expand both lists to be the same length.
Parameters
----------
linewidths : list
line widths of collection
dashes : list
dash specification (offset, (dash pattern tuple))
Returns
-------
linewidths, dashes : list
Will be the same length, dashes are scaled by paired linewidth
"""
if mpl.rcParams['_internal.classic_mode']:
return linewidths, dashes
# make sure they are the same length so we can zip them
if len(dashes) != len(linewidths):
l_dashes = len(dashes)
l_lw = len(linewidths)
gcd = math.gcd(l_dashes, l_lw)
dashes = list(dashes) * (l_lw // gcd)
linewidths = list(linewidths) * (l_dashes // gcd)
# scale the dash patters
dashes = [mlines._scale_dashes(o, d, lw)
for (o, d), lw in zip(dashes, linewidths)]
return linewidths, dashes
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
Parameters
----------
aa : bool or list of bools
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = np.atleast_1d(np.asarray(aa, bool))
self.stale = True
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
Parameters
----------
c : color or list of rgba tuples
See Also
--------
Collection.set_facecolor, Collection.set_edgecolor
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def _set_facecolor(self, c):
if c is None:
c = mpl.rcParams['patch.facecolor']
self._is_filled = True
try:
if c.lower() == 'none':
self._is_filled = False
except AttributeError:
pass
self._facecolors = mcolors.to_rgba_array(c, self._alpha)
self.stale = True
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a color (all patches
have same color), or a sequence of colors; if it is a sequence the
patches will cycle through the sequence.
If *c* is 'none', the patch will not be filled.
Parameters
----------
c : color or list of colors
"""
self._original_facecolor = c
self._set_facecolor(c)
def get_facecolor(self):
return self._facecolors
def get_edgecolor(self):
if cbook._str_equal(self._edgecolors, 'face'):
return self.get_facecolor()
else:
return self._edgecolors
def _set_edgecolor(self, c):
set_hatch_color = True
if c is None:
if (mpl.rcParams['patch.force_edgecolor'] or
not self._is_filled or self._edge_default):
c = mpl.rcParams['patch.edgecolor']
else:
c = 'none'
set_hatch_color = False
self._is_stroked = True
try:
if c.lower() == 'none':
self._is_stroked = False
except AttributeError:
pass
try:
if c.lower() == 'face': # Special case: lookup in "get" method.
self._edgecolors = 'face'
return
except AttributeError:
pass
self._edgecolors = mcolors.to_rgba_array(c, self._alpha)
if set_hatch_color and len(self._edgecolors):
self._hatch_color = tuple(self._edgecolors[0])
self.stale = True
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection.
Parameters
----------
c : color or list of colors or 'face'
The collection edgecolor(s). If a sequence, the patches cycle
through it. If 'face', match the facecolor.
"""
self._original_edgecolor = c
self._set_edgecolor(c)
def set_alpha(self, alpha):
# docstring inherited
super().set_alpha(alpha)
self._update_dict['array'] = True
self._set_facecolor(self._original_facecolor)
self._set_edgecolor(self._original_edgecolor)
def get_linewidth(self):
return self._linewidths
def get_linestyle(self):
return self._linestyles
def update_scalarmappable(self):
"""Update colors from the scalar mappable array, if it is not None."""
if self._A is None:
return
# QuadMesh can map 2d arrays
if self._A.ndim > 1 and not isinstance(self, QuadMesh):
raise ValueError('Collections can only map rank 1 arrays')
if not self._check_update("array"):
return
if self._is_filled:
self._facecolors = self.to_rgba(self._A, self._alpha)
elif self._is_stroked:
self._edgecolors = self.to_rgba(self._A, self._alpha)
self.stale = True
def get_fill(self):
"""Return whether fill is set."""
return self._is_filled
def update_from(self, other):
"""Copy properties from other to self."""
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._original_edgecolor = other._original_edgecolor
self._edgecolors = other._edgecolors
self._original_facecolor = other._original_facecolor
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._us_linestyles = other._us_linestyles
self._pickradius = other._pickradius
self._hatch = other._hatch
# update_from for scalarmappable
self._A = other._A
self.norm = other.norm
self.cmap = other.cmap
# do we need to copy self._update_dict? -JJL
self.stale = True
class _CollectionWithSizes(Collection):
"""
Base class for collections that have an array of sizes.
"""
_factor = 1.0
def get_sizes(self):
"""
Return the sizes ('areas') of the elements in the collection.
Returns
-------
array
The 'area' of each element.
"""
return self._sizes
def set_sizes(self, sizes, dpi=72.0):
"""
Set the sizes of each member of the collection.
Parameters
----------
sizes : ndarray or None
The size to set for each element of the collection. The
value is the 'area' of the element.
dpi : float, default: 72
The dpi of the canvas.
"""
if sizes is None:
self._sizes = np.array([])
self._transforms = np.empty((0, 3, 3))
else:
self._sizes = np.asarray(sizes)
self._transforms = np.zeros((len(self._sizes), 3, 3))
scale = np.sqrt(self._sizes) * dpi / 72.0 * self._factor
self._transforms[:, 0, 0] = scale
self._transforms[:, 1, 1] = scale
self._transforms[:, 2, 2] = 1.0
self.stale = True
@artist.allow_rasterization
def draw(self, renderer):
self.set_sizes(self._sizes, self.figure.dpi)
Collection.draw(self, renderer)
class PathCollection(_CollectionWithSizes):
r"""
A collection of `~.path.Path`\s, as created by e.g. `~.Axes.scatter`.
"""
def __init__(self, paths, sizes=None, **kwargs):
"""
Parameters
----------
paths : list of `.path.Path`
The paths that will make up the `.Collection`.
sizes : array-like
The factor by which to scale each drawn `~.path.Path`. One unit
squared in the Path's data space is scaled to be ``sizes**2``
points when rendered.
**kwargs
Forwarded to `.Collection`.
"""
super().__init__(**kwargs)
self.set_paths(paths)
self.set_sizes(sizes)
self.stale = True
def set_paths(self, paths):
self._paths = paths
self.stale = True
def get_paths(self):
return self._paths
def legend_elements(self, prop="colors", num="auto",
fmt=None, func=lambda x: x, **kwargs):
"""
Create legend handles and labels for a PathCollection.
Each legend handle is a `.Line2D` representing the Path that was drawn,
and each label is a string what each Path represents.
This is useful for obtaining a legend for a `~.Axes.scatter` plot;
e.g.::
scatter = plt.scatter([1, 2, 3], [4, 5, 6], c=[7, 2, 3])
plt.legend(*scatter.legend_elements())
creates three legend elements, one for each color with the numerical
values passed to *c* as the labels.
Also see the :ref:`automatedlegendcreation` example.
Parameters
----------
prop : {"colors", "sizes"}, default: "colors"
If "colors", the legend handles will show the different colors of
the collection. If "sizes", the legend will show the different
sizes. To set both, use *kwargs* to directly edit the `.Line2D`
properties.
num : int, None, "auto" (default), array-like, or `~.ticker.Locator`,
Target number of elements to create.
If None, use all unique elements of the mappable array. If an
integer, target to use *num* elements in the normed range.
If *"auto"*, try to determine which option better suits the nature
of the data.
The number of created elements may slightly deviate from *num* due
to a `~.ticker.Locator` being used to find useful locations.
If a list or array, use exactly those elements for the legend.
Finally, a `~.ticker.Locator` can be provided.
fmt : str, `~matplotlib.ticker.Formatter`, or None (default)
The format or formatter to use for the labels. If a string must be
a valid input for a `~.StrMethodFormatter`. If None (the default),
use a `~.ScalarFormatter`.
func : function, default *lambda x: x*
Function to calculate the labels. Often the size (or color)
argument to `~.Axes.scatter` will have been pre-processed by the
user using a function ``s = f(x)`` to make the markers visible;
e.g. ``size = np.log10(x)``. Providing the inverse of this
function here allows that pre-processing to be inverted, so that
the legend labels have the correct values; e.g. ``func = lambda
x: 10**x``.
**kwargs
Allowed keyword arguments are *color* and *size*. E.g. it may be
useful to set the color of the markers if *prop="sizes"* is used;
similarly to set the size of the markers if *prop="colors"* is
used. Any further parameters are passed onto the `.Line2D`
instance. This may be useful to e.g. specify a different
*markeredgecolor* or *alpha* for the legend handles.
Returns
-------
handles : list of `.Line2D`
Visual representation of each element of the legend.
labels : list of str
The string labels for elements of the legend.
"""
handles = []
labels = []
hasarray = self.get_array() is not None
if fmt is None:
fmt = mpl.ticker.ScalarFormatter(useOffset=False, useMathText=True)
elif isinstance(fmt, str):
fmt = mpl.ticker.StrMethodFormatter(fmt)
fmt.create_dummy_axis()
if prop == "colors":
if not hasarray:
warnings.warn("Collection without array used. Make sure to "
"specify the values to be colormapped via the "
"`c` argument.")
return handles, labels
u = np.unique(self.get_array())
size = kwargs.pop("size", mpl.rcParams["lines.markersize"])
elif prop == "sizes":
u = np.unique(self.get_sizes())
color = kwargs.pop("color", "k")
else:
raise ValueError("Valid values for `prop` are 'colors' or "
f"'sizes'. You supplied '{prop}' instead.")
fmt.set_bounds(func(u).min(), func(u).max())
if num == "auto":
num = 9
if len(u) <= num:
num = None
if num is None:
values = u
label_values = func(values)
else:
if prop == "colors":
arr = self.get_array()
elif prop == "sizes":
arr = self.get_sizes()
if isinstance(num, mpl.ticker.Locator):
loc = num
elif np.iterable(num):
loc = mpl.ticker.FixedLocator(num)
else:
num = int(num)
loc = mpl.ticker.MaxNLocator(nbins=num, min_n_ticks=num-1,
steps=[1, 2, 2.5, 3, 5, 6, 8, 10])
label_values = loc.tick_values(func(arr).min(), func(arr).max())
cond = ((label_values >= func(arr).min()) &
(label_values <= func(arr).max()))
label_values = label_values[cond]
xarr = np.linspace(arr.min(), arr.max(), 256)
values = np.interp(label_values, func(xarr), xarr)
kw = dict(markeredgewidth=self.get_linewidths()[0],
alpha=self.get_alpha())
kw.update(kwargs)
for val, lab in zip(values, label_values):
if prop == "colors":
color = self.cmap(self.norm(val))
elif prop == "sizes":
size = np.sqrt(val)
if np.isclose(size, 0.0):
continue
h = mlines.Line2D([0], [0], ls="", color=color, ms=size,
marker=self.get_paths()[0], **kw)
handles.append(h)
if hasattr(fmt, "set_locs"):
fmt.set_locs(label_values)
l = fmt(lab)
labels.append(l)
return handles, labels
class PolyCollection(_CollectionWithSizes):
def __init__(self, verts, sizes=None, closed=True, **kwargs):
"""
Parameters
----------
verts : list of array-like
The sequence of polygons [*verts0*, *verts1*, ...] where each
element *verts_i* defines the vertices of polygon *i* as a 2D
array-like of shape (M, 2).
sizes : array-like, default: None
Squared scaling factors for the polygons. The coordinates of each
polygon *verts_i* are multiplied by the square-root of the
corresponding entry in *sizes* (i.e., *sizes* specify the scaling
of areas). The scaling is applied before the Artist master
transform.
closed : bool, default: True
Whether the polygon should be closed by adding a CLOSEPOLY
connection at the end.
**kwargs
Forwarded to `.Collection`.
"""
Collection.__init__(self, **kwargs)
self.set_sizes(sizes)
self.set_verts(verts, closed)
self.stale = True
def set_verts(self, verts, closed=True):
"""
Set the vertices of the polygons.
Parameters
----------
verts : list of array-like
The sequence of polygons [*verts0*, *verts1*, ...] where each
element *verts_i* defines the vertices of polygon *i* as a 2D
array-like of shape (M, 2).
closed : bool, default: True
Whether the polygon should be closed by adding a CLOSEPOLY
connection at the end.
"""
self.stale = True
if isinstance(verts, np.ma.MaskedArray):
verts = verts.astype(float).filled(np.nan)
# No need to do anything fancy if the path isn't closed.
if not closed:
self._paths = [mpath.Path(xy) for xy in verts]
return
# Fast path for arrays
if isinstance(verts, np.ndarray) and len(verts.shape) == 3:
verts_pad = np.concatenate((verts, verts[:, :1]), axis=1)
# Creating the codes once is much faster than having Path do it
# separately each time by passing closed=True.
codes = np.empty(verts_pad.shape[1], dtype=mpath.Path.code_type)
codes[:] = mpath.Path.LINETO
codes[0] = mpath.Path.MOVETO
codes[-1] = mpath.Path.CLOSEPOLY
self._paths = [mpath.Path(xy, codes) for xy in verts_pad]
return
self._paths = []
for xy in verts:
if len(xy):
if isinstance(xy, np.ma.MaskedArray):
xy = np.ma.concatenate([xy, xy[:1]])
else:
xy = np.concatenate([xy, xy[:1]])
self._paths.append(mpath.Path(xy, closed=True))
else:
self._paths.append(mpath.Path(xy))
set_paths = set_verts
def set_verts_and_codes(self, verts, codes):
"""Initialize vertices with path codes."""
if len(verts) != len(codes):
raise ValueError("'codes' must be a 1D list or array "
"with the same length of 'verts'")
self._paths = []
for xy, cds in zip(verts, codes):
if len(xy):
self._paths.append(mpath.Path(xy, cds))
else:
self._paths.append(mpath.Path(xy))
self.stale = True
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
def __init__(self, xranges, yrange, **kwargs):
"""
Parameters
----------
xranges : list of (float, float)
The sequence of (left-edge-position, width) pairs for each bar.
yrange : (float, float)
The (lower-edge, height) common to all bars.
**kwargs
Forwarded to `.Collection`.
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [[(xmin, ymin),
(xmin, ymax),
(xmin + xwidth, ymax),
(xmin + xwidth, ymin),
(xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
@classmethod
def span_where(cls, x, ymin, ymax, where, **kwargs):
"""
Return a `.BrokenBarHCollection` that plots horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
*kwargs* are passed on to the collection.
"""
xranges = []
for ind0, ind1 in cbook.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1] - xslice[0]))
return cls(xranges, [ymin, ymax - ymin], **kwargs)
class RegularPolyCollection(_CollectionWithSizes):
"""A collection of n-sided regular polygons."""
_path_generator = mpath.Path.unit_regular_polygon
_factor = np.pi ** (-1/2)
def __init__(self,
numsides,
rotation=0,
sizes=(1,),
**kwargs):
"""
Parameters
----------
numsides : int
The number of sides of the polygon.
rotation : float
The rotation of the polygon in radians.
sizes : tuple of float
The area of the circle circumscribing the polygon in points^2.
**kwargs
Forwarded to `.Collection`.
Examples
--------
See :doc:`/gallery/event_handling/lasso_demo` for a complete example::
offsets = np.random.rand(20, 2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors=facecolors,
edgecolors=("black",),
linewidths=(1,),
offsets=offsets,
transOffset=ax.transData,
)
"""
Collection.__init__(self, **kwargs)
self.set_sizes(sizes)
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
@artist.allow_rasterization
def draw(self, renderer):
self.set_sizes(self._sizes, self.figure.dpi)
self._transforms = [
transforms.Affine2D(x).rotate(-self._rotation).get_matrix()
for x in self._transforms
]
Collection.draw(self, renderer)
class StarPolygonCollection(RegularPolyCollection):
"""Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
r"""
Represents a sequence of `.Line2D`\s that should be drawn together.
This class extends `.Collection` to represent a sequence of
`~.Line2D`\s instead of just a sequence of `~.Patch`\s.
Just as in `.Collection`, each property of a *LineCollection* may be either
a single value or a list of values. This list is then used cyclically for
each element of the LineCollection, so the property of the ``i``\th element
of the collection is::
prop[i % len(prop)]
The properties of each member of a *LineCollection* default to their values
in :rc:`lines.*` instead of :rc:`patch.*`, and the property *colors* is
added in place of *edgecolors*.
"""
_edge_default = True
def __init__(self, segments, # Can be None.
linewidths=None,
colors=None,
antialiaseds=None,
linestyles='solid',
offsets=None,
transOffset=None,
norm=None,
cmap=None,
pickradius=5,
zorder=2,
facecolors='none',
**kwargs
):
"""
Parameters
----------
segments: list of array-like
A sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can have a different number of segments.
linewidths : float or list of float, default: :rc:`lines.linewidth`
The width of each line in points.
colors : color or list of color, default: :rc:`lines.color`
A sequence of RGBA tuples (e.g., arbitrary color strings, etc, not
allowed).
antialiaseds : bool or list of bool, default: :rc:`lines.antialiased`
Whether to use antialiasing for each line.
zorder : int, default: 2
zorder of the lines once drawn.
facecolors : color or list of color, default: 'none'
The facecolors of the LineCollection.
Setting to a value other than 'none' will lead to each line being
"filled in" as if there was an implicit line segment joining the
last and first points of that line back around to each other. In
order to manually specify what should count as the "interior" of
each line, please use `.PathCollection` instead, where the
"interior" can be specified by appropriate usage of
`~.path.Path.CLOSEPOLY`.
**kwargs
Forwareded to `.Collection`.
"""
if colors is None:
colors = mpl.rcParams['lines.color']
if linewidths is None:
linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None:
antialiaseds = (mpl.rcParams['lines.antialiased'],)
colors = mcolors.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
facecolors=facecolors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
zorder=zorder,
**kwargs)
self.set_segments(segments)
def set_segments(self, segments):
if segments is None:
return
_segments = []
for seg in segments:
if not isinstance(seg, np.ma.MaskedArray):
seg = np.asarray(seg, float)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(_seg) for _seg in _segments]
self.stale = True
set_verts = set_segments # for compatibility with PolyCollection
set_paths = set_segments
def get_segments(self):
"""
Returns
-------
list
List of segments in the LineCollection. Each list item contains an
array of vertices.
"""
segments = []
for path in self._paths:
vertices = [vertex for vertex, _ in path.iter_segments()]
vertices = np.asarray(vertices)
segments.append(vertices)
return segments
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i % Noffs
segs[i] = segs[i] + offsets[io:io + 1]
return segs
def set_color(self, c):
"""
Set the color(s) of the LineCollection.
Parameters
----------
c : color or list of colors
Single color (all patches have same color), or a
sequence of rgba tuples; if it is a sequence the patches will
cycle through the sequence.
"""
self.set_edgecolor(c)
self.stale = True
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class EventCollection(LineCollection):
"""
A collection of locations along a single axis at which an "event" occured.
The events are given by a 1-dimensional array. They do not have an
amplitude and are displayed as parallel lines.
"""
_edge_default = True
def __init__(self,
positions, # Cannot be None.
orientation='horizontal',
lineoffset=0,
linelength=1,
linewidth=None,
color=None,
linestyle='solid',
antialiased=None,
**kwargs
):
"""
Parameters
----------
positions : 1D array-like
Each value is an event.
orientation : {'horizontal', 'vertical'}, default: 'horizontal'
The sequence of events is plotted along this direction.
The marker lines of the single events are along the orthogonal
direction.
lineoffset : float, default: 0
The offset of the center of the markers from the origin, in the
direction orthogonal to *orientation*.
linelength : float, default: 1
The total height of the marker (i.e. the marker stretches from
``lineoffset - linelength/2`` to ``lineoffset + linelength/2``).
linewidth : float or list thereof, default: :rc:`lines.linewidth`
The line width of the event lines, in points.
color : color or list of colors, default: :rc:`lines.color`
The color of the event lines.
linestyle : str or tuple or list thereof, default: 'solid'
Valid strings are ['solid', 'dashed', 'dashdot', 'dotted',
'-', '--', '-.', ':']. Dash tuples should be of the form::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
antialiased : bool or list thereof, default: :rc:`lines.antialiased`
Whether to use antialiasing for drawing the lines.
**kwargs
Forwarded to `.LineCollection`.
Examples
--------
.. plot:: gallery/lines_bars_and_markers/eventcollection_demo.py
"""
LineCollection.__init__(self,
[],
linewidths=linewidth,
colors=color,
antialiaseds=antialiased,
linestyles=linestyle,
**kwargs)
self._is_horizontal = True # Initial value, may be switched below.
self._linelength = linelength
self._lineoffset = lineoffset
self.set_orientation(orientation)
self.set_positions(positions)
def get_positions(self):
"""
Return an array containing the floating-point values of the positions.
"""
pos = 0 if self.is_horizontal() else 1
return [segment[0, pos] for segment in self.get_segments()]
def set_positions(self, positions):
"""Set the positions of the events."""
if positions is None:
positions = []
if np.ndim(positions) != 1:
raise ValueError('positions must be one-dimensional')
lineoffset = self.get_lineoffset()
linelength = self.get_linelength()
pos_idx = 0 if self.is_horizontal() else 1
segments = np.empty((len(positions), 2, 2))
segments[:, :, pos_idx] = np.sort(positions)[:, None]
segments[:, 0, 1 - pos_idx] = lineoffset + linelength / 2
segments[:, 1, 1 - pos_idx] = lineoffset - linelength / 2
self.set_segments(segments)
def add_positions(self, position):
"""Add one or more events at the specified positions."""
if position is None or (hasattr(position, 'len') and
len(position) == 0):
return
positions = self.get_positions()
positions = np.hstack([positions, np.asanyarray(position)])
self.set_positions(positions)
extend_positions = append_positions = add_positions
def is_horizontal(self):
"""True if the eventcollection is horizontal, False if vertical."""
return self._is_horizontal
def get_orientation(self):
"""
Return the orientation of the event line ('horizontal' or 'vertical').
"""
return 'horizontal' if self.is_horizontal() else 'vertical'
def switch_orientation(self):
"""
Switch the orientation of the event line, either from vertical to
horizontal or vice versus.
"""
segments = self.get_segments()
for i, segment in enumerate(segments):
segments[i] = np.fliplr(segment)
self.set_segments(segments)
self._is_horizontal = not self.is_horizontal()
self.stale = True
def set_orientation(self, orientation=None):
"""
Set the orientation of the event line.
Parameters
----------
orientation : {'horizontal', 'vertical'}
"""
try:
is_horizontal = cbook._check_getitem(
{"horizontal": True, "vertical": False},
orientation=orientation)
except ValueError:
if (orientation is None or orientation.lower() == "none"
or orientation.lower() == "horizontal"):
is_horizontal = True
elif orientation.lower() == "vertical":
is_horizontal = False
else:
raise
normalized = "horizontal" if is_horizontal else "vertical"
cbook.warn_deprecated(
"3.3", message="Support for setting the orientation of "
f"EventCollection to {orientation!r} is deprecated since "
f"%(since)s and will be removed %(removal)s; please set it to "
f"{normalized!r} instead.")
if is_horizontal == self.is_horizontal():
return
self.switch_orientation()
def get_linelength(self):
"""Return the length of the lines used to mark each event."""
return self._linelength
def set_linelength(self, linelength):
"""Set the length of the lines used to mark each event."""
if linelength == self.get_linelength():
return
lineoffset = self.get_lineoffset()
segments = self.get_segments()
pos = 1 if self.is_horizontal() else 0
for segment in segments:
segment[0, pos] = lineoffset + linelength / 2.
segment[1, pos] = lineoffset - linelength / 2.
self.set_segments(segments)
self._linelength = linelength
def get_lineoffset(self):
"""Return the offset of the lines used to mark each event."""
return self._lineoffset
def set_lineoffset(self, lineoffset):
"""Set the offset of the lines used to mark each event."""
if lineoffset == self.get_lineoffset():
return
linelength = self.get_linelength()
segments = self.get_segments()
pos = 1 if self.is_horizontal() else 0
for segment in segments:
segment[0, pos] = lineoffset + linelength / 2.
segment[1, pos] = lineoffset - linelength / 2.
self.set_segments(segments)
self._lineoffset = lineoffset
def get_linewidth(self):
"""Get the width of the lines used to mark each event."""
return super(EventCollection, self).get_linewidth()[0]
def get_linewidths(self):
return super(EventCollection, self).get_linewidth()
def get_color(self):
"""Return the color of the lines used to mark each event."""
return self.get_colors()[0]
class CircleCollection(_CollectionWithSizes):
"""A collection of circles, drawn using splines."""
_factor = np.pi ** (-1/2)
def __init__(self, sizes, **kwargs):
"""
Parameters
----------
sizes : float or array-like
The area of each circle in points^2.
**kwargs
Forwarded to `.Collection`.
"""
Collection.__init__(self, **kwargs)
self.set_sizes(sizes)
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
class EllipseCollection(Collection):
"""A collection of ellipses, drawn using splines."""
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
Parameters
----------
widths : array-like
The lengths of the first axes (e.g., major axis lengths).
heights : array-like
The lengths of second axes.
angles : array-like
The angles of the first axes, degrees CCW from the x-axis.
units : {'points', 'inches', 'dots', 'width', 'height', 'x', 'y', 'xy'}
The units in which majors and minors are given; 'width' and
'height' refer to the dimensions of the axes, while 'x' and 'y'
refer to the *offsets* data units. 'xy' differs from all others in
that the angle as plotted varies with the aspect ratio, and equals
the specified angle only when the aspect ratio is unity. Hence
it behaves the same as the `~.patches.Ellipse` with
``axes.transData`` as its transform.
**kwargs
Forwarded to `Collection`.
"""
Collection.__init__(self, **kwargs)
self._widths = 0.5 * np.asarray(widths).ravel()
self._heights = 0.5 * np.asarray(heights).ravel()
self._angles = np.deg2rad(angles).ravel()
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = np.empty((0, 3, 3))
self._paths = [mpath.Path.unit_circle()]
def _set_transforms(self):
"""Calculate transforms immediately before drawing."""
ax = self.axes
fig = self.figure
if self._units == 'xy':
sc = 1
elif self._units == 'x':
sc = ax.bbox.width / ax.viewLim.width
elif self._units == 'y':
sc = ax.bbox.height / ax.viewLim.height
elif self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
self._transforms = np.zeros((len(self._widths), 3, 3))
widths = self._widths * sc
heights = self._heights * sc
sin_angle = np.sin(self._angles)
cos_angle = np.cos(self._angles)
self._transforms[:, 0, 0] = widths * cos_angle
self._transforms[:, 0, 1] = heights * -sin_angle
self._transforms[:, 1, 0] = widths * sin_angle
self._transforms[:, 1, 1] = heights * cos_angle
self._transforms[:, 2, 2] = 1.0
_affine = transforms.Affine2D
if self._units == 'xy':
m = ax.transData.get_affine().get_matrix().copy()
m[:2, 2:] = 0
self.set_transform(_affine(m))
@artist.allow_rasterization
def draw(self, renderer):
self._set_transforms()
Collection.draw(self, renderer)
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds* are
None, they default to their `.rcParams` patch setting, in sequence
form.
The use of `~matplotlib.cm.ScalarMappable` functionality is optional.
If the `~matplotlib.cm.ScalarMappable` matrix ``_A`` has been set (via
a call to `~.ScalarMappable.set_array`), at draw time a call to scalar
mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.get_fill():
return patch.get_facecolor()
return [0, 0, 0, 0]
kwargs['facecolors'] = [determine_facecolor(p) for p in patches]
kwargs['edgecolors'] = [p.get_edgecolor() for p in patches]
kwargs['linewidths'] = [p.get_linewidth() for p in patches]
kwargs['linestyles'] = [p.get_linestyle() for p in patches]
kwargs['antialiaseds'] = [p.get_antialiased() for p in patches]
Collection.__init__(self, **kwargs)
self.set_paths(patches)
def set_paths(self, patches):
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
class TriMesh(Collection):
"""
Class for the efficient drawing of a triangular mesh using Gouraud shading.
A triangular mesh is a `~matplotlib.tri.Triangulation` object.
"""
def __init__(self, triangulation, **kwargs):
Collection.__init__(self, **kwargs)
self._triangulation = triangulation
self._shading = 'gouraud'
self._is_filled = True
self._bbox = transforms.Bbox.unit()
# Unfortunately this requires a copy, unless Triangulation
# was rewritten.
xy = np.hstack((triangulation.x.reshape(-1, 1),
triangulation.y.reshape(-1, 1)))
self._bbox.update_from_data_xy(xy)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(self._triangulation)
@staticmethod
def convert_mesh_to_paths(tri):
"""
Convert a given mesh into a sequence of `~.Path` objects.
This function is primarily of use to implementers of backends that do
not directly support meshes.
"""
triangles = tri.get_masked_triangles()
verts = np.stack((tri.x[triangles], tri.y[triangles]), axis=-1)
return [mpath.Path(x) for x in verts]
@artist.allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, gid=self.get_gid())
transform = self.get_transform()
# Get a list of triangles and the color at each vertex.
tri = self._triangulation
triangles = tri.get_masked_triangles()
verts = np.stack((tri.x[triangles], tri.y[triangles]), axis=-1)
self.update_scalarmappable()
colors = self._facecolors[triangles]
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
renderer.draw_gouraud_triangles(gc, verts, colors, transform.frozen())
gc.restore()
renderer.close_group(self.__class__.__name__)
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices.
The dimensions of this array are (*meshWidth* + 1, *meshHeight* + 1).
Each vertex in the mesh has a different set of "mesh coordinates"
representing its position in the topology of the mesh.
For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the colors).
For example, the first entry in *coordinates* is the coordinates of the
vertex at mesh coordinates (0, 0), then the one at (0, 1), then at (0, 2)
.. (0, meshWidth), (1, 0), (1, 1), and so on.
*shading* may be 'flat', or 'gouraud'
"""
def __init__(self, meshWidth, meshHeight, coordinates,
antialiased=True, shading='flat', **kwargs):
Collection.__init__(self, **kwargs)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
# By converting to floats now, we can avoid that on every draw.
self._coordinates = np.asarray(coordinates, float).reshape(
(meshHeight + 1, meshWidth + 1, 2))
self._antialiased = antialiased
self._shading = shading
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
self.stale = True
def get_datalim(self, transData):
return (self.get_transform() - transData).transform_bbox(self._bbox)
@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Convert a given mesh into a sequence of `~.Path` objects.
This function is primarily of use to implementers of backends that do
not directly support quadmeshes.
"""
if isinstance(coordinates, np.ma.MaskedArray):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[:-1, :-1],
c[:-1, 1:],
c[1:, 1:],
c[1:, :-1],
c[:-1, :-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [mpath.Path(x) for x in points]
def convert_mesh_to_triangles(self, meshWidth, meshHeight, coordinates):
"""
Convert a given mesh into a sequence of triangles, each point
with its own color. This is useful for experiments using
`~.RendererBase.draw_gouraud_triangle`.
"""
if isinstance(coordinates, np.ma.MaskedArray):
p = coordinates.data
else:
p = coordinates
p_a = p[:-1, :-1]
p_b = p[:-1, 1:]
p_c = p[1:, 1:]
p_d = p[1:, :-1]
p_center = (p_a + p_b + p_c + p_d) / 4.0
triangles = np.concatenate((
p_a, p_b, p_center,
p_b, p_c, p_center,
p_c, p_d, p_center,
p_d, p_a, p_center,
), axis=2)
triangles = triangles.reshape((meshWidth * meshHeight * 4, 3, 2))
c = self.get_facecolor().reshape((meshHeight + 1, meshWidth + 1, 4))
c_a = c[:-1, :-1]
c_b = c[:-1, 1:]
c_c = c[1:, 1:]
c_d = c[1:, :-1]
c_center = (c_a + c_b + c_c + c_d) / 4.0
colors = np.concatenate((
c_a, c_b, c_center,
c_b, c_c, c_center,
c_c, c_d, c_center,
c_d, c_a, c_center,
), axis=2)
colors = colors.reshape((meshWidth * meshHeight * 4, 3, 4))
return triangles, colors
@artist.allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
transform = self.get_transform()
transOffset = self.get_offset_transform()
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:, 0])
ys = self.convert_yunits(self._offsets[:, 1])
offsets = np.column_stack([xs, ys])
self.update_scalarmappable()
if not transform.is_affine:
coordinates = self._coordinates.reshape((-1, 2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
if self._shading == 'gouraud':
triangles, colors = self.convert_mesh_to_triangles(
self._meshWidth, self._meshHeight, coordinates)
renderer.draw_gouraud_triangles(
gc, triangles, colors, transform.frozen())
else:
renderer.draw_quad_mesh(
gc, transform.frozen(), self._meshWidth, self._meshHeight,
coordinates, offsets, transOffset,
# Backends expect flattened rgba arrays (n*m, 4) for fc and ec
self.get_facecolor().reshape((-1, 4)),
self._antialiased, self.get_edgecolors().reshape((-1, 4)))
gc.restore()
renderer.close_group(self.__class__.__name__)
self.stale = False
patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'TriMesh', 'PolyCollection', 'BrokenBarHCollection',
'RegularPolyCollection', 'PathCollection',
'StarPolygonCollection', 'PatchCollection',
'CircleCollection', 'Collection',):
docstring.interpd.update({k: patchstr})
docstring.interpd.update(LineCollection=artist.kwdoc(LineCollection))
| gpl-2.0 |
mitschabaude/nanopores | scripts/pughpore/randomwalk/create_plot_traj.py | 1 | 4699 | from matplotlib.ticker import FormatStrFormatter
import matplotlib
import nanopores as nano
import nanopores.geometries.pughpore as pughpore
from nanopores.models.pughpore import polygon
from nanopores.models.pughpoints import plot_polygon
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
import nanopores.tools.fields as f
HOME = os.path.expanduser("~")
PAPERDIR = os.path.join(HOME, "papers", "paper-howorka")
FIGDIR = os.path.join(PAPERDIR, "figures", "")
DATADIR = os.path.join(HOME,"Dropbox", "nanopores", "fields")
f.set_dir_mega()
up = nano.Params(pughpore.params, k=3)
hpore=up.hpore
l0 = up.l0
l1 = up.l1
l2 = up.l2
l3 = up.l3
l4 = up.l4
hpore = up.hpore
hmem = up.hmem
h2 = up.h2
h1 = up.h1
h4 = up.h4
def save_fig_traj(params,fieldsname,i,showtraj):
data=f.get_fields(fieldsname,**params)
b1 =data["b1"]
b2 =data["b2"]
if showtraj:
X = data["X"][i]
Y = data["Y"][i]
Z = data["Z"][i]
T = data["T"][i]
J = data["J"][i]
J=J.load()
T=T.load()
curr = 7.523849e-10
bind1 = np.where(T>1e6)[0]
bind2 = np.intersect1d(np.where(T<=1e6)[0],np.where(T>100.)[0])
amplitude = curr-np.inner(J,T)/np.sum(T)
for k in range(1,T.shape[0]):
T[k]=T[k]+T[k-1]
tau_off=T[-1]
J=J*1e12
figname = fieldsname+'_traj_'+'%.8f'%(tau_off*1e-6)+'_%04d'%i+'_%.1e_%.1e_%.1e_%.1e'%(params["avgbind1"],params["avgbind2"],params["P_bind1"],params["P_bind2"])+str(params["z0"])
else:
figname = fieldsname+'_bindzones'+'_%.1e_%.1e_%.1e_%.1e'%(params["avgbind1"],params["avgbind2"],params["P_bind1"],params["P_bind2"])+str(params["z0"])
if showtraj:
fig=plt.figure(figsize=(8,5),dpi=80)
else:
fig=plt.figure(figsize=(3,5),dpi=80)
color2='#ff0000'
color1='#ff9900'
color3='#00ff00'
#b1 = [[[l1/2.,17.],[l1/2.,19.]],[[l3/2.,-hpore/2.],[l3/2.,hpore/2.-h2],[l2/2.,hpore/2.-h2],[l2/2.,14.]]]
for seq in b1:
x= [p[0] for p in seq]
xm=[-p[0] for p in seq]
y= [p[1] for p in seq]
plt.plot(x,y,color=color1,linewidth=2.)
plt.plot(xm,y,color=color1,linewidth=2.)
#b2 = [[[l3/2.-.5,-3.],[l3/2.-.5,11.]]]
for seq in b2:
x= [p[0] for p in seq]
xm=[-p[0] for p in seq]
y= [p[1] for p in seq]
plt.plot(x,y,color=color2,linewidth=2.)
plt.plot(xm,y,color=color2,linewidth=2.)
if showtraj:
plt.plot(X,Z,linewidth=1.,c='#0000ff')
longer = plt.scatter(X[bind1],Z[bind1],s=200,marker='h',c=color2,linewidth=0.)
shorter = plt.scatter(X[bind2],Z[bind2],s=100,marker='h',c=color1,linewidth=0.)
start = plt.scatter([X[0]],[Z[0]],s=200,marker='x',c=color3,linewidth=2.)
patches=[start]
labels=['Start']
if showtraj and len(bind1)>0:
patches=patches+[longer]
labels+=['Longer bindings']
if showtraj and len(bind2)>0:
patches=patches+[shorter]
labels+=['Shorter bindings']
if showtraj:
plt.legend(patches,labels,scatterpoints=1,loc=(.42,.15))
ax=plt.gca()
ax.set_aspect('equal')
if showtraj:
ax.set_xlim([20.,-55.])
ax.set_ylim([-25.,40.])
else:
ax.set_xlim([20.,-20.])
ax.set_ylim([-25.,40.])
ax.set_xticks([])
ax.set_yticks([])
plt.axis('off')
plot_polygon(ax,polygon(rmem=60.))
if showtraj:
plt.axes([.55,.5,.2,.3])
plt.title('Current signal')
ax=plt.gca()
if tau_off<1e3:
t = np.linspace(0.,tau_off,3)
fac=1.
ax.set_xlabel('time [$ns$]')
elif tau_off<1e6 and tau_off>=1e3:
t = np.linspace(0.,tau_off*1e-3,3)
fac = 1e-3
ax.set_xlabel(r'time [$\mu s$]')
else:
t = np.linspace(0.,tau_off*1e-6,3)
fac = 1e-6
ax.set_xlabel('time [$ms$]')
T=T*fac
plt.plot(T,J,color='#000000')
yt = np.linspace(580.,760,4)
ax.set_ylabel(r'A [$pA$]')
ax.set_yticks(yt)
ax.set_xticks(t)
xfmt=FormatStrFormatter('%.1f')
ax.xaxis.set_major_formatter(xfmt)
ax.set_xlim([-4e-2*tau_off*fac,(1.+4e-2)*tau_off*fac])
plt.tight_layout()
# nano.savefigs(name=figname,DIR='/home/bstadlbau/plots/')
plt.show()
print 'savefig: %s'%figname
plt.close("all")
fieldsname='events_onlyone_2'
params=dict(avgbind1=2e7,avgbind2=3e4,P_bind1=8.e-2,P_bind2=0*3e-1,z0=hpore/2.+0.)
i=15
showtraj = True
save_fig_traj(params,fieldsname,i,showtraj) | mit |
imaculate/scikit-learn | examples/tree/unveil_tree_structure.py | 67 | 4824 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %ss else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
numenta/htmresearch | projects/union_path_integration/plot_convergence.py | 4 | 11807 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Plot convergence chart."""
import collections
import json
import os
import matplotlib.pyplot as plt
import numpy as np
CWD = os.path.dirname(os.path.realpath(__file__))
CHART_DIR = os.path.join(CWD, "charts")
def chart():
if not os.path.exists(CHART_DIR):
os.makedirs(CHART_DIR)
# Convergence vs. number of objects, comparing # unique features
#
# Generated with:
# python convergence_simulation.py --numObjects 200 400 600 800 1000 1200 1400 1600 1800 2000 --numUniqueFeatures 50 --locationModuleWidth 20 --resultName results/convergence_vs_num_objs_50_feats.json
# python convergence_simulation.py --numObjects 200 400 600 800 1000 1200 1400 1600 1800 2000 --numUniqueFeatures 100 --locationModuleWidth 20 --resultName results/convergence_vs_num_objs_100_feats.json
# python convergence_simulation.py --numObjects 200 400 600 800 1000 1200 1400 1600 1800 2000 --numUniqueFeatures 5000 --locationModuleWidth 20 --resultName results/convergence_vs_num_objs_5000_feats.json
#plt.style.use("ggplot")
markers = ("s", "o", "^")
for feats, marker in zip((100, 200, 5000), markers):
with open("results/convergence_vs_num_objs_{}_feats.json".format(feats), "r") as f:
convVsObjects = json.load(f)
yData = collections.defaultdict(list)
for exp in convVsObjects:
numObjects = int(str(exp[0]["numObjects"]))
if "null" in exp[1]["convergence"].keys():
continue
results = exp[1]["convergence"].items()
total = 0
count = 0
for i, j in results:
total += (int(str(i)) * j)
count += j
y = float(total) / float(count)
yData[numObjects].append(y)
x = list(sorted(yData.keys()))
yData = sorted(yData.iteritems())
y = [float(sum(pair[1])) / float(len(pair[1]))
if None not in pair[1] else None
for pair in yData]
std = [np.std(pair[1])
for pair in yData]
yBelow = [yi - stdi
for yi, stdi in zip(y, std)]
yAbove = [yi + stdi
for yi, stdi in zip(y, std)]
xError = x[:len(yBelow)]
plt.plot(
x, y, "{}-".format(marker), label="{} unique features".format(feats),
)
#plt.fill_between(xError, yBelow, yAbove, alpha=0.3)
plt.xlabel("Number of Objects")
plt.xticks([(i+1)*200 for i in xrange(10)])
plt.ylabel("Average Number of Sensations")
plt.legend(loc="center right")
plt.tight_layout()
plt.savefig(os.path.join(CHART_DIR, "convergence_vs_objects_w_feats.pdf"))
plt.clf()
# Convergence vs. number of objects, varying module size
# NOT USED in Columns Plus
#
# Generated with:
# TODO
#plt.style.use("ggplot")
#for cpm in (25, 100, 400):
# with open("results/convergence_vs_num_objs_{}_cpm.json".format(cpm), "r") as f:
# convVsObjs = json.load(f)
# yData = collections.defaultdict(list)
# for exp in convVsObjs:
# results = exp[1]["convergence"].items()
# total = 0
# count = 0
# for i, j in results:
# total += (int(str(i)) * j)
# count += j
# y = float(total) / float(count)
# numObjects = int(str(exp[0]["numObjects"]))
# yData[numObjects].append(y)
# x = list(sorted(yData.keys()))
# yData = sorted(yData.iteritems())
# y = [float(sum(pair[1])) / float(len(pair[1])) for pair in yData]
# std = [np.std(pair[1]) for pair in yData]
# yBelow = [yi - stdi for yi, stdi in zip(y, std)]
# yAbove = [yi + stdi for yi, stdi in zip(y, std)]
# plt.plot(
# x, y, "o-", label="{} cells per module".format(cpm),
# )
# plt.fill_between(x, yBelow, yAbove, alpha=0.3)
#plt.xlabel("Number of Objects")
#plt.ylabel("Average Number of Sensations")
#plt.legend(loc="upper left")
#plt.tight_layout()
#plt.savefig(os.path.join(CHART_DIR, "convergence_with_modsize.pdf"))
#plt.clf()
# Convergence vs. number of modules
#
# Generated with:
# python convergence_simulation.py --numObjects 100 --numUniqueFeatures 100 --locationModuleWidth 5 --numModules 1 2 3 4 5 6 7 8 9 10 --resultName results/convergence_vs_num_modules_100_feats_25_cpm.json --repeat 10
# python convergence_simulation.py --numObjects 100 --numUniqueFeatures 100 --locationModuleWidth 10 --numModules 1 2 3 4 5 6 7 8 9 10 --resultName results/convergence_vs_num_modules_100_feats_100_cpm.json --repeat 10
# python convergence_simulation.py --numObjects 100 --numUniqueFeatures 100 --locationModuleWidth 20 --numModules 1 2 3 4 5 6 7 8 9 10 --resultName results/convergence_vs_num_modules_100_feats_400_cpm.json --repeat 10
#plt.style.use("ggplot")
markers = ("s", "o", "^")
for cpm, marker in zip((49, 100, 400), markers):
with open("results/convergence_vs_num_modules_100_feats_{}_cpm.json".format(cpm), "r") as f:
convVsMods100 = json.load(f)
yData = collections.defaultdict(list)
for exp in convVsMods100:
results = exp[1]["convergence"].items()
total = 0
count = 0
for i, j in results:
if str(i) == "null":
total = 50 * j
else:
total += (int(str(i)) * j)
count += j
y = float(total) / float(count)
numModules = int(str(exp[0]["numModules"]))
yData[numModules].append(y)
x = [i+1 for i in xrange(20)]
#y = [float(sum(pair[1])) / float(len(pair[1])) for pair in yData]
y = [float(sum(yData[step])) / float(len(yData[step])) for step in x]
#yData20 = yData[19][1]
#y20 = float(sum(yData20)) / float(len(yData20))
yData = sorted(yData.iteritems())
std = [np.std(pair[1]) for pair in yData]
yBelow = [yi - stdi for yi, stdi in zip(y, std)]
yAbove = [yi + stdi for yi, stdi in zip(y, std)]
plt.plot(
x, y, "{}-".format(marker),
label="{} cells per module".format(cpm),
)
#plt.fill_between(x, yBelow, yAbove, alpha=0.3)
# TODO: Update this to ideal?
plt.plot([1, 20], [2.022, 2.022], "r--", label="Ideal")
plt.xlabel("Number of Modules")
plt.ylabel("Average Number of Sensations")
plt.legend(loc="upper right")
plt.ylim((0.0, 7.0))
plt.xticks([(i+1)*2 for i in xrange(10)])
plt.tight_layout()
plt.savefig(os.path.join(CHART_DIR, "convergence_vs_modules_100_feats.pdf"))
plt.clf()
# Cumulative convergence
#
# Generated with:
# python convergence_simulation.py --numObjects 100 --numUniqueFeatures 10 --locationModuleWidth 20 --thresholds 18 --resultName results/cumulative_convergence_400_cpm_10_feats_100_objs.json --repeat 10
# python convergence_simulation.py --numObjects 100 --numUniqueFeatures 10 --locationModuleWidth 10 --thresholds 19 --resultName results/cumulative_convergence_100_cpm_10_feats_100_objs.json --repeat 10
# python ideal_sim.py
# python bof_sim.py
numSteps = 12
# 1600 CPM
yData = collections.defaultdict(list)
with open("results/cumulative_convergence_1600_cpm_10_feats_100_objs.json", "r") as f:
experiments = json.load(f)
for exp in experiments:
cum = 0
for i in xrange(40):
step = i + 1
count = exp[1]["convergence"].get(str(step), 0)
yData[step].append(count)
x = [i+1 for i in xrange(numSteps)]
y = []
tot = float(sum([sum(counts) for counts in yData.values()]))
cum = 0.0
for step in x:
counts = yData[step]
cum += float(sum(counts))
y.append(100.0 * cum / tot)
std = [np.std(yData[step]) for step in x]
yBelow = [yi - stdi for yi, stdi in zip(y, std)]
yAbove = [yi + stdi for yi, stdi in zip(y, std)]
plt.plot(
x, y, "s-", label="1600 Cells Per Module",
)
#plt.fill_between(x, yBelow, yAbove, alpha=0.3)
# 400 CPM
yData = collections.defaultdict(list)
with open("results/cumulative_convergence_400_cpm_10_feats_100_objs.json", "r") as f:
experiments = json.load(f)
for exp in experiments:
cum = 0
for i in xrange(40):
step = i + 1
count = exp[1]["convergence"].get(str(step), 0)
yData[step].append(count)
x = [i+1 for i in xrange(numSteps)]
y = []
tot = float(sum([sum(counts) for counts in yData.values()]))
cum = 0.0
for step in x:
counts = yData[step]
cum += float(sum(counts))
y.append(100.0 * cum / tot)
std = [np.std(yData[step]) for step in x]
yBelow = [yi - stdi for yi, stdi in zip(y, std)]
yAbove = [yi + stdi for yi, stdi in zip(y, std)]
plt.plot(
x, y, "o-", label="400 Cells Per Module",
)
#plt.fill_between(x, yBelow, yAbove, alpha=0.3)
## 289 CPM
yData = collections.defaultdict(list)
with open("results/cumulative_convergence_289_cpm_10_feats_100_objs_1.json", "r") as f:
experiments = json.load(f)
for exp in experiments:
cum = 0
for i in xrange(40):
step = i + 1
count = exp[1]["convergence"].get(str(step), 0)
yData[step].append(count)
x = [i+1 for i in xrange(numSteps)]
y = []
tot = float(sum([sum(counts) for counts in yData.values()]))
cum = 0.0
for step in x:
counts = yData[step]
cum += float(sum(counts))
y.append(100.0 * cum / tot)
std = [np.std(yData[step]) for step in x]
yBelow = [yi - stdi for yi, stdi in zip(y, std)]
yAbove = [yi + stdi for yi, stdi in zip(y, std)]
plt.plot(
x, y, "^-", label="289 Cells Per Module",
)
#plt.fill_between(x, yBelow, yAbove, alpha=0.3)
# Ideal
with open("results/ideal.json", "r") as f:
idealResults = json.load(f)
x = [i+1 for i in xrange(numSteps)]
y = []
std = [np.std(idealResults.get(str(steps), [0])) for steps in x]
tot = float(sum([sum(counts) for counts in idealResults.values()]))
cum = 0.0
for steps in x:
counts = idealResults.get(str(steps), [])
if len(counts) > 0:
cum += float(sum(counts))
y.append(100.0 * cum / tot)
yBelow = [yi - stdi for yi, stdi in zip(y, std)]
yAbove = [yi + stdi for yi, stdi in zip(y, std)]
plt.plot(
x, y, "x--", label="Ideal Observer",
)
#plt.fill_between(x, yBelow, yAbove, alpha=0.3)
# BOF
with open("results/bof.json", "r") as f:
bofResults = json.load(f)
x = [i+1 for i in xrange(numSteps)]
y = []
std = [np.std(bofResults.get(str(steps), [0])) for steps in x]
tot = float(sum([sum(counts) for counts in bofResults.values()]))
cum = 0.0
for steps in x:
counts = bofResults.get(str(steps), [])
if len(counts) > 0:
cum += float(sum(counts))
y.append(100.0 * cum / tot)
yBelow = [yi - stdi for yi, stdi in zip(y, std)]
yAbove = [yi + stdi for yi, stdi in zip(y, std)]
plt.plot(
x, y, "d--", label="Bag of Features",
)
#plt.fill_between(x, yBelow, yAbove, alpha=0.3)
# Formatting
plt.xlabel("Number of Sensations")
plt.ylabel("Cumulative Accuracy")
plt.legend(loc="center right")
plt.xticks([(i+1)*2 for i in xrange(6)])
plt.tight_layout()
plt.savefig(os.path.join(CHART_DIR, "cumulative_accuracy.pdf"))
plt.clf()
if __name__ == "__main__":
chart()
| agpl-3.0 |
patemotter/trilinos-prediction | ml_files/preprocess_properties_data.py | 1 | 1859 | # Written using Anaconda with Python 3.5
# Pate Motter
# 1-19-17
# Input;
# Properties files have many columns of features computed using Anamod
# Properties file used is trilinos-prediction/data/uflorida-features.csv
import pandas as pd
import numpy as np
matrix_properties = pd.read_csv('../data/uflorida-features.csv', header=0)
matrix_properties.columns = ['rows', 'cols', 'min_nnz_row', 'row_var', 'col_var',
'diag_var', 'nnz', 'frob_norm', 'symm_frob_norm',
'antisymm_frob_norm', 'one_norm', 'inf_norm', 'symm_inf_norm',
'antisymm_inf_norm', 'max_nnz_row', 'trace', 'abs_trace',
'min_nnz_row', 'avg_nnz_row', 'dummy_rows', 'dummy_rows_kind',
'num_value_symm_1', 'nnz_pattern_symm_1', 'num_value_symm_2',
'nnz_pattern_symm_2', 'row_diag_dom', 'col_diag_dom', 'diag_avg',
'diag_sign', 'diag_nnz', 'lower_bw', 'upper_bw', 'row_log_val_spread',
'col_log_val_spread', 'symm', 'matrix']
# Create hash id's for matrix names
hash_dict = {}
matrix_names = matrix_properties['matrix'].unique()
for name in matrix_names:
hash_dict[name] = hash(name)
hash_list = []
matrix_name_series = matrix_properties['matrix']
for name in matrix_name_series:
hash_list.append(hash_dict[name])
matrix_id_series = pd.Series(hash_list)
matrix_properties = matrix_properties.assign(matrix_id=pd.Series(matrix_id_series))
# Fixes any issues with data being > float32, will break some sklearn algos :(
for col in matrix_properties:
if matrix_properties[col].values.dtype == np.float64:
matrix_properties[col] = matrix_properties[col].astype(np.float32)
matrix_properties[col] = np.nan_to_num(matrix_properties[col])
matrix_properties.to_csv('processed_properties.csv')
| mit |
lucidjuvenal/quis-custodiet | twitter_feed/twittest.py | 1 | 1922 | import twitter # python-twitter package
from matplotlib.pyplot import pause
import re
############################################
# secret data kept in separate file
with open('twitdat.txt') as f:
fromFile = {}
for line in f:
line = line.split() # to skip blank lines
if len(line)==3 : #
fromFile[line[0]] = line[2]
f.close()
#print fromFile
api = twitter.Api(
consumer_key = fromFile['consumer_key'],
consumer_secret = fromFile['consumer_secret'],
access_token_key = fromFile['access_token_key'],
access_token_secret = fromFile['access_token_secret']
)
# https://twitter.com/gov/status/743263851366449152
tweetID = 743263851366449152
# https://twitter.com/BBCMOTD/status/744216695976255492
tweetID = 744216695976255492
# https://twitter.com/BBCMOTD/status/744281250924474368
tweetID = 744281250924474368
try:
tweet = api.GetStatus(status_id = tweetID)
except ConnectionError :
print "should have a backup here"
candidates = ['goodguy', 'evilguy']
tags = ['precinct','ballotbox']
tags.extend(candidates)
tags = set(tags)
def getVotes(tweet,tags):
'''
tweet is the Status object from the python-twitter api.
tags is a set of strings
currently returns correct data for well-formatted tweet text
need to include checks for multiple numbers/candidates per line, give error
'''
data = {}
lines = re.split('[,;\n]', tweet.text.lower())
for line in lines:
if '#' not in line: # Ignore hashtags
for tag in tags:
if tag in line:
data[tag] = int(re.search(r'\d+', line).group())
return data
def testMsgs(tweet, msgs):
for msg in msgs:
tweet.text = msg
def subTweet(tweet,msgID=0):
t1 = "Goodguy 57 votes!\nEvilguy 100\n#Hashtest"
t2 = "57 Goodguy\n100 Evilguy\n#Hashtest"
t3 = "goodguy 57 evilguy 100"
msgs = [ [], t1, t2, t3 ]
tweet.text = msgs[msgID]
return tweet
tweet = subTweet(tweet, 3)
print getVotes(tweet, tags)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.