repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
henrykironde/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
LLNL/spack | var/spack/repos/builtin/packages/py-workload-automation/package.py | 4 | 2174 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyWorkloadAutomation(PythonPackage):
"""Workload Automation (WA) is a framework for executing workloads and
collecting measurements on Android and Linux devices."""
homepage = "https://github.com/ARM-software/workload-automation"
url = "https://github.com/ARM-software/workload-automation/archive/v3.2.tar.gz"
version('3.2', sha256='a3db9df6a9e0394231560ebe6ba491a513f6309e096eaed3db6f4cb924c393ea')
version('3.1.4', sha256='217fc33a3739d011a086315ef86b90cf332c16d1b03c9dcd60d58c9fd1f37f98')
version('3.1.3', sha256='152470808cf8dad8a833fd7b2cb7d77cf8aa5d1af404e37fa0a4ff3b07b925b2')
version('3.1.2', sha256='8226a6abc5cbd96e3f1fd6df02891237a06cdddb8b1cc8916f255fcde20d3069')
version('3.1.1', sha256='32a19be92e43439637c68d9146f21bb7a0ae7b8652c11dfc4b4bd66d59329ad4')
version('3.1.0', sha256='f00aeef7a1412144c4139c23b4c48583880ba2147207646d96359f1d295d6ac3')
version('3.0.0', sha256='8564b0c67541e3a212363403ee090dfff5e4df85770959a133c0979445b51c3c')
version('2.7.0', sha256='e9005b9db18e205bf6c4b3e09b15a118abeede73700897427565340dcd589fbb')
version('2.6.0', sha256='b94341fb067592cebe0db69fcf7c00c82f96b4eb7c7210e34b38473869824cce')
depends_on('py-setuptools', type='build')
depends_on('py-python-dateutil', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-pyserial', type=('build', 'run'))
depends_on('py-colorama', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
depends_on('py-wrapt', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='^[email protected]:')
depends_on('[email protected]:0.24.2', type=('build', 'run'), when='^python@:3.5.2')
depends_on('py-future', type=('build', 'run'))
depends_on('py-louie', type=('build', 'run'))
depends_on('py-devlib', type=('build', 'run'))
| lgpl-2.1 |
bmazin/SDR | Projects/ChannelizerSim/channelizerSim.py | 1 | 13632 | import numpy as np
import matplotlib.pyplot as plt
import scipy.signal
from util.popup import pop #from ARCONS-pipeline
def tone(freq,nSamples,sampleRate,initialPhase=0.):
dt = 1/sampleRate
time = np.arange(0,nSamples)*dt
out = np.exp(1.j*(2*np.pi*freq*time+initialPhase))
#out = np.cos(2*np.pi*freq*time)
return out
def toneWithPhasePulse(freq,nSamples,sampleRate=512.e6,pulseAmpDeg=-35.,decayTime=30.e-6,riseTime=.5e-6,pulseArrivalTime=50.e-6,initialPhase=0.):
dt = 1./sampleRate
time = np.arange(0,nSamples)*dt
pulseArrivalIndex = pulseArrivalTime//dt
pulseAmp = np.pi/180.*pulseAmpDeg
pulse = np.exp(-time[0:nSamples-pulseArrivalIndex]/decayTime)
rise = np.exp(-time[0:pulseArrivalIndex]/riseTime)[::-1]
paddedPulse = np.append(rise,pulse)
phase = pulseAmp*paddedPulse
out = np.exp(1.j*(2*np.pi*freq*time+phase+initialPhase))
return out
def pfb(x,nTaps=4,fftSize=512,binWidthScale=2.5,windowType='hanning'):
windowSize = nTaps*fftSize
angles = np.arange(-nTaps/2.,nTaps/2.,1./fftSize)
window = np.sinc(binWidthScale*angles)
if windowType == 'hanning':
window *= np.hanning(windowSize)
nFftChunks = len(x)//fftSize
#it takes a windowSize worth of x values to get the first out value, so there will be windowsize fewer values in out, assuming x divides evenly by fftSize
out = np.zeros(nFftChunks*fftSize-windowSize,np.complex128)
for i in xrange(0,len(out),fftSize):
windowedChunk = x[i:i+windowSize]*window
out[i:i+fftSize] = windowedChunk.reshape(nTaps,fftSize).sum(axis=0)
return out
def dftBin(k,k0,m,N=512):
return np.exp(1.j*m*np.pi*k0)*(1-np.exp(1.j*2*np.pi*k0))/(1-np.exp(1.j*2*np.pi/N*(k0-k)))
def dftSingle(signal,n,freqIndex):
indices = np.arange(0,n)
signalCropped = signal[0:n]
dft = np.sum(signalCropped*np.exp(-2*np.pi*1.j*indices*freqIndex/N))
return dft
instrument = 'darkness'#'darkness'
if instrument == 'arcons':
fftSize=512
nPfbTaps=4
sampleRate = 512.e6
clockRate = 256.e6
bFlipSigns = True
lpfCutoff = 250.e3# 125 kHz in old firmware, should probably be 250 kHz
nLpfTaps = 20
binOversamplingRate = 2. #number of samples from the same bin used in a clock cycle
downsample = 2
elif instrument == 'darkness':
fftSize=2**12#2048 #2**11
nPfbTaps=4
sampleRate = 1.8e9
clockRate = 225.e6
bFlipSigns = True
lpfCutoff = 250.e3# 125 kHz in old firmware, should probably be 250 kHz
nLpfTaps = 20
binOversamplingRate = 2. #number of samples from the same bin used in a clock cycle
downsample = 1
nSimultaneousInputs = sampleRate/clockRate #number of consecutive samples (from consecutive channels) used in a clock cycle
binSpacing = sampleRate/fftSize #space between fft bin centers
binSampleRate = binOversamplingRate*nSimultaneousInputs*clockRate/fftSize #each fft bin is sampled at this rate
print 'instrument: ',instrument
print nSimultaneousInputs,' consecutive inputs'
print binSpacing/1.e6, 'MHz between bin centers'
print binSampleRate/1.e6, 'MHz sampling of fft bin'
lpf = scipy.signal.firwin(nLpfTaps, cutoff=lpfCutoff, nyq=binSampleRate/2.,window='hanning')
#Define the resonant frequency of interest
resFreq = 10.99e6 #[Hz]
binIndex = round(resFreq/binSpacing) #nearest FFT freq bin to resFreq
binFreq = binIndex*binSpacing #center frequency of bin at binIndex
#Define some additional nearby resonant frequencies. These should be filtered out of the channel of interest.
resSpacing = .501e6 #250 kHz minimum space between resonators
resFreq2 = resFreq-resSpacing#[Hz] Resonator frequency
resFreq3 = resFreq+resSpacing#[Hz] Resonator frequency
resFreqIndex = fftSize*resFreq/sampleRate
resFreqIndex2 = fftSize*resFreq2/sampleRate
resFreqIndex3 = fftSize*resFreq3/sampleRate
print 'nearest-k',binIndex
print 'freq',resFreq/1.e6,'MHz k0',resFreqIndex
print 'freq2',resFreq2/1.e6,'MHz k0',resFreqIndex2
print 'freq3',resFreq3/1.e6,'MHz k0',resFreqIndex3
nSamples = 4*nPfbTaps*fftSize
#Simulate the frequency response for a range of frequencies
freqStep = 0.01e6
freqResponseBandwidth = 8.e6
#1st stage, apply pfb and fft
freqList = np.arange(binFreq-freqResponseBandwidth/2, binFreq+freqResponseBandwidth/2, freqStep)
nFreq = len(freqList)
freqResponseFft = np.zeros(nFreq,dtype=np.complex128)
freqResponsePfb = np.zeros(nFreq,dtype=np.complex128)
#sweep through frequencies
for iFreq,freq in enumerate(freqList):
signal = tone(freq,nSamples,sampleRate)
fft = (np.fft.fft(signal,fftSize))
fft /= fftSize
fftBin = fft[binIndex]
freqResponseFft[iFreq] = fftBin
fft = (np.fft.fft(pfb(signal,fftSize=fftSize,nTaps=nPfbTaps),fftSize))
fft /= fftSize
fftBin = fft[binIndex]
freqResponsePfb[iFreq] = fftBin
freqResponseFftDb = 10*np.log10(np.abs(freqResponseFft))
freqResponsePfbDb = 10*np.log10(np.abs(freqResponsePfb))
#2nd stage, shift res freq to zero and apply low pass filter
normFreqStep = freqStep*2*np.pi/binSampleRate
#binBandNormFreqs = np.arange(-np.pi-10*normFreqStep,np.pi,normFreqStep)
binBandNormFreqs = (freqList-resFreq)*2*np.pi/binSampleRate # frequencies in bandwidth of a fft bin[rad/sample]
_,freqResponseLpf = scipy.signal.freqz(lpf, worN=binBandNormFreqs)
freqResponseLpfDb = 10*np.log10(np.abs(freqResponseLpf))
binBandFreqs = resFreq+binBandNormFreqs*binSampleRate/(2.*np.pi)
freqResponse = freqResponseLpf*freqResponsePfb
phaseResponse = np.rad2deg(np.unwrap(np.angle(freqResponse)))
phaseResponseLpf = np.rad2deg(np.unwrap(np.angle(freqResponseLpf)))
phaseResponsePfb = np.rad2deg(np.unwrap(np.angle(freqResponsePfb)))
#phaseResponse = np.angle(freqResponse,deg=True)
#phaseResponseLpf = np.angle(freqResponseLpf,deg=True)
#phaseResponsePfb = np.angle(freqResponsePfb,deg=True)
freqResponseDb = 10.*np.log10(np.abs(freqResponse))
freqListMHz = freqList/1.e6
#Plot Response vs Frequency for 1st and 2nd stages
def f(fig,ax):
ax.plot(freqListMHz,freqResponsePfbDb,label='FFT bin')
#ax.plot(freqListMHz,freqResponseFftDb)
#ax.plot(freqListMHz,freqResponseLpfDb)
ax.plot(freqListMHz,freqResponseDb,label='final channel')
ax.axvline(resFreq/1.e6,color='.5',label='resonant freq')
ax.axvline(resFreq2/1.e6,color='.8')
ax.axvline(resFreq3/1.e6,color='.8')
ax.set_ylim(-50,0)
ax.set_ylabel('response (dB)')
ax.set_xlabel('frequency (MHz)')
ax.legend(loc='lower left')
pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
#Plot Response vs Phase for 1st and 2nd stages
def f(fig,ax):
ax.plot(freqListMHz,phaseResponse,label='overall')
ax.plot(freqListMHz,phaseResponseLpf,label='LPF')
ax.plot(freqListMHz,phaseResponsePfb,label='FFT')
ax.axvline(resFreq/1.e6,color='.5',label='resonant freq')
ax.axvline(resFreq2/1.e6,color='.8')
ax.axvline(resFreq3/1.e6,color='.8')
ax.set_ylabel('phase response ($^\circ$)')
ax.set_xlabel('frequency (MHz)')
ax.legend(loc='best')
pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
###################################################################
#Now we'll simulate the full channelizer with a probe tone at the resonant frequency
nSamples = 100*nPfbTaps*fftSize
#signal = tone(resFreq,nSamples,sampleRate)
signal = toneWithPhasePulse(resFreq,nSamples,sampleRate)
#add in other tones
signal += toneWithPhasePulse(resFreq2,nSamples,sampleRate,pulseAmpDeg=-90.,pulseArrivalTime=200.e-6,initialPhase=2.)
signal += toneWithPhasePulse(resFreq3,nSamples,sampleRate,pulseAmpDeg=-80.,pulseArrivalTime=300.e-6,initialPhase=1.)
#signal += tone(resFreq3,nSamples,sampleRate,initialPhase=1.)
time = np.arange(nSamples)/sampleRate/1.e-6 #in us
#signal = tone(resFreq,nSamples,sampleRate)+tone(10.e6,nSamples,sampleRate)
#Plot raw signal vs time coming in from ADC
def f(fig,ax):
ax.plot(time,np.angle(signal),'r')
ax.plot(time,np.abs(signal),'b')
ax.set_xlabel('Time (us)')
ax.set_title('Signal with Phase Pulse')
#pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
rawFftSize=100000
rawFft = 10*np.log10(np.fft.fftshift(np.abs(np.fft.fft(signal,n=rawFftSize)/rawFftSize)))
rawFreqs = sampleRate*np.fft.fftshift(np.fft.fftfreq(rawFftSize))/1.e6
#plot fft of raw signal
def f(fig,ax):
ax.plot(rawFreqs,rawFft)
ax.set_xlabel('Freq (MHz)')
ax.set_ylabel('Amp (dB)')
ax.set_title('Frequency Content of\nraw signal')
ax.set_xlim((resFreq-2.*resSpacing)/1.e6,(resFreq+2.*resSpacing)/1.e6)
ax.set_ylim(-35,5)
pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
filteredSignal0 = pfb(signal[:-fftSize/2],fftSize=fftSize,nTaps=nPfbTaps)
filteredSignal1 = pfb(signal[fftSize/2:],fftSize=fftSize,nTaps=nPfbTaps)
#trim off end if the length doesn't evenly divide by fftSize
nFfts = len(filteredSignal0)//fftSize
filteredSignal0 = filteredSignal0[:nFfts*fftSize]
filteredSignal1 = filteredSignal1[:nFfts*fftSize]
foldedSignal0 = np.reshape(filteredSignal0,(nFfts,fftSize))
foldedSignal1 = np.reshape(filteredSignal1,(nFfts,fftSize))
ffts0 = np.fft.fft(foldedSignal0,n=fftSize,axis=1) #each row will be the fft of a row in foldedSignal
ffts1 = np.fft.fft(foldedSignal1,n=fftSize,axis=1)
#flip signs for odd frequencies in every other bin sample
if bFlipSigns:
ffts1[:,1::2] *= -1
binTimestream0 = ffts0[:,binIndex] #pick out values for the bin corresponding to resFreq
binTimestream1 = ffts1[:,binIndex]
#binTimestream = binTimestream0
binTimestream = np.hstack(zip(binTimestream0,binTimestream1))#interleave timestreams
time = time[:len(filteredSignal0):fftSize/binOversamplingRate]
#Plot signal vs time coming out of FFT bin
def f(fig,ax):
ax.plot(time,np.real(binTimestream),color='b')
ax.plot(time,np.imag(binTimestream),color='r')
ax.set_xlabel('Time (us)')
ax.set_title('After 1st Stage')
#pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
binFftSize=100 #Now for checking shifted frequencies in a bin
binFft = 10*np.log10(np.fft.fftshift(np.abs(np.fft.fft(binTimestream,n=binFftSize)/binFftSize)))
binFreqs = (binFreq+np.fft.fftshift(np.fft.fftfreq(binFftSize))*binSampleRate)/1.e6#MHz
#plot fft of signal coming out of FFT bin with frequencies shifted to show the resonant frequency
def f(fig,ax):
ax.plot(binFreqs,binFft)
ax.set_xlabel('Freq (MHz)')
ax.set_ylabel('Amp (dB)')
ax.set_title('Frequency Content of\nsampled FFT bin')
#pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
binFreqs = np.fft.fftshift(np.fft.fftfreq(binFftSize))
#plot fft of signal coming out of FFT bin
def f(fig,ax):
ax.plot(binFreqs,binFft)
ax.set_xlabel('Freq/$f_s$')
ax.set_ylabel('Amp (dB)')
ax.set_title('Frequency Content of\nsampled FFT bin')
pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
ddsTimestream = tone(freq=binFreq-resFreq,nSamples=len(binTimestream),sampleRate=binSampleRate)
channelTimestream = binTimestream*ddsTimestream
def f(fig,ax):
ax.plot(time,np.abs(channelTimestream),color='b')
ax.plot(time,np.angle(channelTimestream,deg=True),color='r')
ax.set_title('After DDS')
pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
channelFftSize=100 #Now for checking shifted frequencies in a bin
channelFft = 10*np.log10(np.fft.fftshift(np.abs(np.fft.fft(channelTimestream,n=channelFftSize)/channelFftSize)))
channelFreqs = (resFreq+np.fft.fftshift(np.fft.fftfreq(channelFftSize))*binSampleRate)/1.e6#MHz
#plot fft of signal coming out of DDS mixer with frequencies shifted to show the resonant frequency
def f(fig,ax):
ax.plot(channelFreqs,channelFft)
ax.set_xlabel('Freq (MHz)')
ax.set_ylabel('Amp (dB)')
ax.set_title('Frequency Content of\nDDS mixer output')
#pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
channelFreqs = np.fft.fftshift(np.fft.fftfreq(channelFftSize))
#plot fft of signal coming out of DDS mixer
def f(fig,ax):
ax.plot(channelFreqs,channelFft)
ax.set_xlabel('Freq/$f_s$')
ax.set_ylabel('Amp (dB)')
ax.set_title('Frequency Content of\nDDS mixer output')
pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
channelTimestream = np.convolve(channelTimestream,lpf,mode='same')
channelTimestream = channelTimestream[::downsample]
time=time[::downsample]
finalFftSize=200 #Now for checking shifted frequencies in a bin
finalFft = 10*np.log10(np.fft.fftshift(np.abs(np.fft.fft(channelTimestream,n=finalFftSize)/finalFftSize)))
finalFreqs = (resFreq+np.fft.fftshift(np.fft.fftfreq(finalFftSize))*binSampleRate)/1.e6#MHz
#plot fft of signal coming out of DDS mixer with frequencies shifted to show the resonant frequency
def f(fig,ax):
ax.plot(finalFreqs,finalFft)
ax.set_xlabel('Freq (MHz)')
ax.set_ylabel('Amp (dB)')
ax.set_title('Frequency Content of\nLPF output')
#pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
finalFreqs = np.fft.fftshift(np.fft.fftfreq(finalFftSize))
#plot fft of signal coming out of DDS mixer
def f(fig,ax):
ax.plot(finalFreqs,finalFft)
ax.set_xlabel('Freq/$f_s$')
ax.set_ylabel('Amp (dB)')
ax.set_title('Frequency Content of\nLPF output')
pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
phase = np.angle(channelTimestream,deg=True)[1:-1]
time = time[1:-1]
phaseOffset = phase[0]
phase -= phaseOffset #in readout, done by rotating IQ loops in templar
customFIR = np.loadtxt('matched_30us.txt')[::-1]
filteredPhase = np.convolve(phase,customFIR,mode='same')
def f(fig,ax):
#ax.plot(time,np.abs(channelTimestream),color='b')
ax.plot(time,phase,color='r',marker='.')
ax.plot(time,filteredPhase,color='m',marker='.')
ax.set_xlabel('Time (us)')
ax.set_ylabel('Phase ($^{\circ}$)')
ax.set_title('Output Phase')
pop(plotFunc=lambda fig,axes: f(fig=fig,ax=axes))
plt.show()
| gpl-2.0 |
shayanb/SEC.gov-form-retriever | parser_nsar.py | 1 | 5676 | #!/usr/bin/env python
import sys
import re
import csv
import os.path
from itertools import izip
import shutil
import pandas as pd
#TODO:
# Read the input file via sysarg or path crawling --> include everything in pathrun()
# Change the hardcodename argument
# FIX: Q 8 - 23
# Joining all the funds to one csv
# DONE! series i ! remove the %i from the name of the column
#
hardcodename = "filename" # hard coded name for each fund in each csv - later on this should be changed to a part of the original csv file
def txt2csv(txtfile, csvfilename):
'''
converts the txt files to csv files with [Key,Value] structure
'''
colonseperated = re.compile(' *(.+) *: *(.+) *')
fixedfields = re.compile('(\d{3} +\w{6,7}) +(.*)')
series = re.compile('<(\w{,}.*)>(\w{,}.*)')
matchers = [colonseperated, fixedfields, series]
outfile = csv.writer(open(csvfilename +'.csv', 'w'))
outfile.writerow(['Key', 'Value'])
for line in open(txtfile):
line = line.strip()
for matcher in matchers:
match = matcher.match(line)
if match and list(match.groups())[1] not in ('', ' ') and (list(match.groups())[0] not in ('PAGE')):
outfile.writerow(list(match.groups()))
def pathrun():
'''
starts crawling on the ciks folders, opens the txt files and send them to txt2csv
then runs seriesExtract to extract the series
'''
path = os.getcwd()
path = (path+"/NSAR")
ciks = os.listdir(path)
for cik in ciks:
if cik.isdigit():
years = os.listdir(path + "/" + cik)
for i in years:
os.chdir(path + "/" + cik + "/" + str(i))
qtrs = os.listdir(".")
for j in qtrs:
os.chdir(path+"/"+cik+ "/" +str(i)+"/"+j)
print os.listdir(".")
for nsarfile in os.listdir("."):
if nsarfile.endswith(".txt"):
print str(nsarfile)
txt2csv(nsarfile,cik+'-'+i+'-'+j)
## seriesExtract(cik+'-'+i+'-'+j+'.csv') #UNCOMMENT this when series extract is done
csvfile="/Users/sbeta/Desktop/NSARFILES/0000910472-08-000530.csv"
def haveSeries(csvfile):
'''
check if the csvfile has any series (funds) returns the line number of 007 A000000 and the number of series
'''
with open(csvfile, 'rb') as f:
reader = csv.reader(f)
row2 = 0
for row in reader:
if (row[0] == '007 A000000') and (row[1]=='Y'):
baseline = reader.line_num
print baseline
seriesnum = reader.next()
seriesnum1 = seriesnum[1]
print seriesnum1
return baseline,seriesnum1
# else:
# print "NO Funds"
# return 0, 0
def transpose(csvfile):
'''
transpose the rows and columns of csv file
'''
a = izip(*csv.reader(open(csvfile, "rb")))
filename = os.path.splitext(os.path.basename(csvfile))[0]
csv.writer(open(filename + "_T.csv", "wb")).writerows(a)
def mergeCsv(filename1,seriesnum):
"""
goes through all the fund classes and merges the csvs together on inner join
"""
result = pd.read_csv(filename1 + "_1.csv")
str = []
for i in xrange(2,int(seriesnum)): #GO WITH APPEND!
result = result.append(filename1 + "_" + str(i) + ".csv")
# eachfile = pd.read_csv(filename1 + "_" + str(i) + ".csv")
#merged = firstfile.merge(eachfile, left_index=True, right_index=True, how='outer')
#merged.to_csv("result.csv", index=False, na_rep='NA')
result.to_csv("result.csv")
#firstfile = pd.read_csv("result.csv").reindex()
#firstfile.reindex(index="Key")
def writeInCSV(filename,key,value):
'''
Hacky code to make/write into a csvfile
'''
outfile = csv.writer(open(filename +'.csv', 'a'))
outfile.writerow([key, value])
def seriesExtract(csvfile):
'''
extract fund/series details
'''
with open(csvfile, 'rb') as f:
row2 = 0
reader = csv.reader(f)
if haveSeries(csvfile) != False:
seriesBaseNNum=haveSeries(csvfile)
for row in reader: # this part writes all the lines from line number 1 to right before question 7 repeatedly to the quantity of funds
if reader.line_num < seriesBaseNNum[0]:
writeInCSV(hardcodename + "_1", row[0], row[1])
for i in xrange(2,int(seriesBaseNNum[1])+1): # hacky code to copy the file to the number of funds
shutil.copyfile(hardcodename + "_1.csv", hardcodename + "_" + str(i) + ".csv")
f.seek(0)
#reader2 = csv.reader(f)
for row in reader:
for i in xrange(1,int(seriesBaseNNum[1])):
seriesi = re.compile ("(?P<qno>\d{3})(?P<qsec>( \w| )\d{2})%02d(?P<qlast>\d{2}.{,})" %i) # seperate the question number and fund number
matchers = seriesi.search(row[0])
if matchers:
print matchers.group("qno")+ matchers.group("qsec")+matchers.group("qlast")
writeInCSV(hardcodename + "_" + str(i), matchers.group("qno")+ matchers.group("qsec")+matchers.group("qlast"),row[1]) # removes the fund number from the question number
#just run pathrun() ... wait for it... TADA!
#pathrun()
#seriesExtract(csvfile)
#for i in xrange(1,16):
# transpose(hardcodename + "_" + str(i) + ".csv")
mergeCsv(hardcodename, 16)
#transpose("result.csv")
| gpl-2.0 |
keiserlab/e3fp-paper | project/analysis/comparison/get_confusion_stats.py | 1 | 5389 | """Calculate performance stats for specific thresholds from confusion matrix.
Author: Seth Axen
E-mail: [email protected]
"""
from __future__ import print_function, division
import logging
import os
import glob
import cPickle as pkl
import sys
import numpy as np
from scipy.sparse import issparse
from python_utilities.scripting import setup_logging
from python_utilities.io_tools import smart_open
from sklearn.metrics import confusion_matrix, precision_recall_curve
def get_max_f1_thresh(y_true, y_score):
"""Get maximum F1-score and corresponding threshold."""
precision, recall, thresh = precision_recall_curve(y_true, y_score)
f1 = 2 * precision * recall / (precision + recall)
max_f1_ind = np.argmax(f1)
max_f1 = f1[max_f1_ind]
max_f1_thresh = thresh[max_f1_ind]
return max_f1, max_f1_thresh
def get_metrics_at_thresh(y_true, y_score, thresh):
"""Get sensitivity, specificity, precision and F1-score at threshold."""
y_pred = y_score >= thresh
confusion = confusion_matrix(y_true, y_pred)
tn, fp, fn, tp = confusion.ravel()
sensitivity = tp / (tp + fn) # recall
specificity = tn / (fp + tn)
precision = tp / (tp + fp)
f1 = 2 * precision * sensitivity / (precision + sensitivity)
return sensitivity, specificity, precision, f1
def compute_fold_metrics(target_mol_array, mask_file, results_file,
thresh=None):
"""Compute metrics from fold at maximum F1-score or threshold."""
logging.info("Loading mask file.")
with smart_open(mask_file, "rb") as f:
train_test_mask = pkl.load(f)
test_mask = train_test_mask == 1
del train_test_mask
logging.info("Loading results from file.")
with np.load(results_file) as data:
results = data["results"]
logging.info("Computing metrics.")
y_true = target_mol_array[test_mask].ravel()
y_score = results[test_mask].ravel()
nan_inds = np.where(~np.isnan(y_score))
y_true, y_score = y_true[nan_inds], y_score[nan_inds]
del results, test_mask, target_mol_array
if thresh is None:
f1, thresh = get_max_f1_thresh(y_true, y_score)
pvalue = 10**(-thresh)
sensitivity, specificity, precision, f1 = get_metrics_at_thresh(y_true,
y_score,
thresh)
logging.debug(("P-value: {:.4g} Sensitivity: {:.4f} "
"Specificity: {:.4f} Precision: {:.4f} "
"F1: {:.4f}").format(pvalue, sensitivity, specificity,
precision, f1))
return (pvalue, sensitivity, specificity, precision, f1)
def compute_average_metrics(cv_dir, thresh=None):
"""Compute fold metrics averaged across fold."""
input_file = os.path.join(cv_dir, "inputs.pkl.bz2")
fold_dirs = glob.glob(os.path.join(cv_dir, "*/"))
logging.debug("Loading input files.")
with smart_open(input_file, "rb") as f:
(fp_array, mol_to_fp_inds, target_mol_array,
target_list, mol_list) = pkl.load(f)
del fp_array, mol_to_fp_inds, target_list, mol_list
if issparse(target_mol_array):
target_mol_array = target_mol_array.toarray().astype(np.bool)
fold_metrics = []
for fold_dir in sorted(fold_dirs):
mask_file = glob.glob(os.path.join(fold_dir, "*mask*"))[0]
results_file = glob.glob(os.path.join(fold_dir, "*result*"))[0]
fold_metric = compute_fold_metrics(target_mol_array, mask_file,
results_file, thresh=thresh)
fold_metrics.append(fold_metric)
fold_metrics = np.asarray(fold_metrics)
mean_metrics = fold_metrics.mean(axis=0)
std_metrics = fold_metrics.std(axis=0)
logging.debug(("P-value: {:.4g} +/- {:.4g} "
"Sensitivity: {:.4f} +/- {:.4f} "
"Specificity: {:.4f} +/- {:.4f} "
"Precision: {:.4f} +/- {:.4f} "
"F1: {:.4f} +/- {:.4f}").format(
mean_metrics[0], std_metrics[0],
mean_metrics[1], std_metrics[1],
mean_metrics[2], std_metrics[2],
mean_metrics[3], std_metrics[3],
mean_metrics[4], std_metrics[4]))
return mean_metrics
if __name__ == "__main__":
try:
e3fp_cv_dir, ecfp_cv_dir = sys.argv[1:3]
except IndexError:
sys.exit("Usage: python get_confusion_stats.py <e3fp_cv_dir> "
"<ecfp_cv_dir>")
setup_logging(verbose=True)
logging.info("Getting average metrics for E3FP")
metrics = compute_average_metrics(e3fp_cv_dir)
max_thresh = -np.log10(metrics[0])
logging.info("Getting average metrics for E3FP at p-value: {:.4g}".format(
metrics[0]))
compute_average_metrics(e3fp_cv_dir, thresh=max_thresh)
logging.info("Getting average metrics for ECFP4 at p-value: {:.4g}".format(
metrics[0]))
compute_average_metrics(ecfp_cv_dir, thresh=max_thresh)
logging.info("Getting average metrics for ECFP4")
metrics = compute_average_metrics(ecfp_cv_dir)
max_thresh = -np.log10(metrics[0])
logging.info("Getting average metrics for ECFP4 at p-value: {:.4g}".format(
metrics[0]))
compute_average_metrics(ecfp_cv_dir, thresh=max_thresh)
| lgpl-3.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/frame/test_apply.py | 3 | 24470 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime
import warnings
import numpy as np
from pandas import (notnull, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, reduce=False)
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, reduce=True)
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast(self):
broadcasted = self.frame.apply(np.mean, broadcast=True)
agged = self.frame.apply(np.mean)
for col, ts in compat.iteritems(broadcasted):
assert (ts == agged[col]).all()
broadcasted = self.frame.apply(np.mean, axis=1, broadcast=True)
agged = self.frame.apply(np.mean, axis=1)
for idx in broadcasted.index:
assert (broadcasted.xs(idx) == agged[idx]).all()
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = self.mixed_frame._apply_standard(np.mean, 0,
ignore_failures=True)
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), broadcast=True)
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notnull(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = DataFrame(np.tile(self.frame.index,
(len(self.frame.columns), 1)).T,
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
def test_apply_multi_index(self):
s = DataFrame([[1, 2], [3, 4], [5, 6]])
s.index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s.columns = ['col1', 'col2']
res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)
assert isinstance(res.index, MultiIndex)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = pd.Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = pd.Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, reduce=True)
reduce_false = df.apply(fn, reduce=False)
reduce_none = df.apply(fn, reduce=None)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
# See gh-12244
def test_apply_non_numpy_dtype(self):
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def zip_frames(*frames):
"""
take a list of frames, zip the columns together for each
assume that these all have the first frame columns
return a new frame
"""
columns = frames[0].columns
zipped = [f[c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
class TestDataFrameAggregate(TestData):
_multiprocess_can_split_ = True
def test_agg_transform(self):
with np.errstate(all='ignore'):
f_sqrt = np.sqrt(self.frame)
f_abs = np.abs(self.frame)
# ufunc
result = self.frame.transform(np.sqrt)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
result = self.frame.apply(np.sqrt)
assert_frame_equal(result, expected)
result = self.frame.transform(np.sqrt)
assert_frame_equal(result, expected)
# list-like
result = self.frame.apply([np.sqrt])
expected = f_sqrt.copy()
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.sqrt])
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
expected = zip_frames(f_sqrt, f_abs)
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt', 'absolute']])
result = self.frame.apply([np.sqrt, np.abs])
assert_frame_equal(result, expected)
result = self.frame.transform(['sqrt', np.abs])
assert_frame_equal(result, expected)
def test_transform_and_agg_err(self):
# cannot both transform and agg
def f():
self.frame.transform(['max', 'min'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.agg(['max', 'sqrt'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.transform(['max', 'sqrt'])
pytest.raises(ValueError, f)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']})
def test_demo(self):
# demonstration tests
df = pd.DataFrame({'A': range(5), 'B': 5})
result = df.agg(['min', 'max'])
expected = DataFrame({'A': [0, 4], 'B': [5, 5]},
columns=['A', 'B'],
index=['min', 'max'])
tm.assert_frame_equal(result, expected)
result = df.agg({'A': ['min', 'max'], 'B': ['sum', 'max']})
expected = DataFrame({'A': [4.0, 0.0, np.nan],
'B': [5.0, np.nan, 25.0]},
columns=['A', 'B'],
index=['max', 'min', 'sum'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
def test_agg_dict_nested_renaming_depr(self):
df = pd.DataFrame({'A': range(5), 'B': 5})
# nested renaming
with tm.assert_produces_warning(FutureWarning):
df.agg({'A': {'foo': 'min'},
'B': {'bar': 'max'}})
def test_agg_reduce(self):
# all reducers
expected = zip_frames(self.frame.mean().to_frame(),
self.frame.max().to_frame(),
self.frame.sum().to_frame()).T
expected.index = ['mean', 'max', 'sum']
result = self.frame.agg(['mean', 'max', 'sum'])
assert_frame_equal(result, expected)
# dict input with scalars
result = self.frame.agg({'A': 'mean', 'B': 'sum'})
expected = Series([self.frame.A.mean(), self.frame.B.sum()],
index=['A', 'B'])
assert_series_equal(result.reindex_like(expected), expected)
# dict input with lists
result = self.frame.agg({'A': ['mean'], 'B': ['sum']})
expected = DataFrame({'A': Series([self.frame.A.mean()],
index=['mean']),
'B': Series([self.frame.B.sum()],
index=['sum'])})
assert_frame_equal(result.reindex_like(expected), expected)
# dict input with lists with multiple
result = self.frame.agg({'A': ['mean', 'sum'],
'B': ['sum', 'max']})
expected = DataFrame({'A': Series([self.frame.A.mean(),
self.frame.A.sum()],
index=['mean', 'sum']),
'B': Series([self.frame.B.sum(),
self.frame.B.max()],
index=['sum', 'max'])})
assert_frame_equal(result.reindex_like(expected), expected)
def test_nuiscance_columns(self):
# GH 15015
df = DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
result = df.agg('min')
expected = Series([1, 1., 'bar', pd.Timestamp('20130101')],
index=df.columns)
assert_series_equal(result, expected)
result = df.agg(['min'])
expected = DataFrame([[1, 1., 'bar', pd.Timestamp('20130101')]],
index=['min'], columns=df.columns)
assert_frame_equal(result, expected)
result = df.agg('sum')
expected = Series([6, 6., 'foobarbaz'],
index=['A', 'B', 'C'])
assert_series_equal(result, expected)
result = df.agg(['sum'])
expected = DataFrame([[6, 6., 'foobarbaz']],
index=['sum'], columns=['A', 'B', 'C'])
assert_frame_equal(result, expected)
| mit |
aleung12/ast381 | PTMCMC.py | 1 | 44864 | import numpy as np
import random
import scipy.constants as const
import datetime
from jdcal import gcal2jd, jd2jcal, jcal2jd
import time
import multiprocessing as mp
import gc
def pt_aux(Tladder,nchain,swapInt,burnIn,models,param_list,param_ct,psmin,psmax,jhr,real_data,it):
verbose = False
output = mp.Queue()
proc = []
for wnum in range(nchain):
model = models[wnum]
T = Tladder[wnum]
proc.append(mp.Process(target=mcmc_fit, args=(swapInt,model,wnum,burnIn,verbose,param_list,param_ct,psmin,psmax,jhr,real_data,T,output,it)))
for p in proc: p.start()
for p in proc: p.join()
results = [output.get() for p in proc]
results.sort()
print(' ###### results for iteration '+str(it+1)+': ')
print('')
print(results)
print('')
last_posit = []
for wnum in range(nchain):
last_posit.append(results[wnum][1])
new_posit = propose_swaps(real_data,nchain,Tladder,last_posit)
return new_posit
def propose_swaps(real_data,nchain,temp_ladder,last_posit):
print(' ###### propose_swaps() reached at: '+time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()))
models = []
for i in range(nchain): models.append([])
avail_temps = np.copy(temp_ladder)
avail_indices = np.arange(0,8)
swap_acc_rates = np.zeros(8)
for k in range(nchain/2):
swap_elements = [-1,-1]
while swap_elements[0] == swap_elements[1]:
swap_elements = np.random.randint(nchain-2*k,size=2)
beta_i = 1. /avail_temps[swap_elements[0]]
beta_j = 1. /avail_temps[swap_elements[1]]
chisq_sep_i, chisq_PA_i, chisq_RV_i = prob_data_given_model(real_data,last_posit[avail_indices[swap_elements[0]]])
chisq_sep_j, chisq_PA_j, chisq_RV_j = prob_data_given_model(real_data,last_posit[avail_indices[swap_elements[1]]])
arg = -sum(chisq_sep_i) +sum(chisq_sep_j) -sum(chisq_PA_i) +sum(chisq_PA_j) -chisq_RV_i +chisq_RV_j
if arg >= 0: swap_prob = 1.
else: swap_prob = exp(arg) **(beta_j -beta_i)
swap_prob = min(1,swap_prob)
if np.random.rand() < swap_prob: # accept swap
models[avail_indices[swap_elements[0]]] = last_posit[avail_indices[swap_elements[1]]]
models[avail_indices[swap_elements[1]]] = last_posit[avail_indices[swap_elements[0]]]
print(' ###### proposed swap accepted ')
print(' ###### between T = { %.0f , %.0f } '%(avail_temps[swap_elements[0]],avail_temps[swap_elements[1]]))
print(' ###### swap acceptance probability: %.3f'%swap_prob)
print(' ######')
else:
models[avail_indices[swap_elements[0]]] = last_posit[avail_indices[swap_elements[0]]]
models[avail_indices[swap_elements[1]]] = last_posit[avail_indices[swap_elements[1]]]
print(' ###### proposed swap rejected ')
print(' ###### between T = { %.0f , %.0f } '%(avail_temps[swap_elements[0]],avail_temps[swap_elements[1]]))
print(' ###### swap acceptance probability: %.3f'%swap_prob)
print(' ######')
swap_acc_rates[avail_indices[swap_elements[0]]] = swap_acc_rates[avail_indices[swap_elements[1]]] = swap_prob
avail_temps = np.delete(avail_temps,swap_elements)
avail_indices = np.delete(avail_indices,swap_elements)
#print(models)
### return list of length=nchain with parameter states post-swap
### return swap acceptance rate for each temperature
return models, swap_acc_rates
def pt_runs(maxit,w81):
t00 = time.time()
param_list, param_ct, nchain, swapInt, jhr, psmin, psmax, real_data, models, temp_ladder = init_all(w81)
swap_acc_rate = open('swap_acceptance_rate.dat','w')
swap_acc_rate.write('# iteration \t')
for i in range(len(temp_ladder)):
swap_acc_rate.write(' T = %.1f \t'%(temp_ladder[i]))
swap_acc_rate.write('\n')
for it in range(maxit):
t0,tl = time.time(),time.localtime()
print(' ###### iteration %.0f of pt_runs() began at: '%(it+1) +time.strftime("%a, %d %b %Y %H:%M:%S", tl))
#if (it==0): burnIn = True
#else: burnIn = False
burnIn = True # [2015-12-16] always require burn-in after swaps
if it == 0: swapInt = 2e4
else: swapInt = 1e4 #2e3
models, sarates = pt_aux(temp_ladder,nchain,swapInt,burnIn,models,param_list,param_ct,psmin,psmax,jhr,real_data,it)
swap_acc_rate.write(' %.0f \t'%(it))
for T in range(len(sarates)):
swap_acc_rate.write(' %.4f \t'%(sarates[T]))
swap_acc_rate.write('\n')
print(' ###### iteration %.0f of pt_runs() finished at: '%(it+1) +time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()))
if (it%100)==0:
plot_mcorbits(1,it+1,100)
plot_proj_orbits(it+1,100)
T = 1
mchain = get_mcsample(1,it+1,T,'hist')
plot_posterior(mchain,it+1,T,True)
mchain = []
gc.collect()
swap_acc_rate.close()
def prob_accept(data,prev_model,cand_model,var,jhr,psmin,psmax,T):
prev_chisq_sep, prev_chisq_PA, prev_chisq_RV = prob_data_given_model(data,prev_model)
cand_chisq_sep, cand_chisq_PA, cand_chisq_RV = prob_data_given_model(data,cand_model)
arg = (sum(prev_chisq_sep) -sum(cand_chisq_sep) +sum(prev_chisq_PA) -sum(cand_chisq_PA) +prev_chisq_RV -cand_chisq_RV) /float(T)
#if var == 3:
# psmin[var] = -1.
# psmax[var] = 1.
# prev_model[var] = cos(prev_model[var])
# cand_model[var] = cos(cand_model[var])
if (not var==3):
# properly normalize proposal distribution (uniform) if jump range extends beyond low end of parameter space
if ((cand_model[var] -jhr[var]) < psmin[var]) or ((prev_model[var] -jhr[var]) < psmin[var]):
arg += ln( (2*jhr[var]) / min((2*jhr[var]),(cand_model[var]+jhr[var]-psmin[var])) )
arg -= ln( (2*jhr[var]) / min((2*jhr[var]),(prev_model[var]+jhr[var]-psmin[var])) )
# properly normalize proposal distribution (uniform) if jump range extends beyond high end of parameter space
elif ((cand_model[var] +jhr[var]) > psmax[var]) or ((prev_model[var] +jhr[var]) > psmax[var]):
arg += ln( (2*jhr[var]) / min((2*jhr[var]),(-cand_model[var]+jhr[var]+psmax[var])) )
arg -= ln( (2*jhr[var]) / min((2*jhr[var]),(-prev_model[var]+jhr[var]+psmax[var])) )
if arg >= 0: prob_acc = 1.
else: prob_acc = exp(arg)
return prob_acc
def prob_data_given_model(data,model): # likelihood to be expressed as function of chi-squared's
n = len(data[0])
JD,sep,sepunc,PA,PAunc = data[0],data[1],data[2],data[3],data[4]
smjr_ax,P,ecc,inclin,bOmega,omega,tP = model[0],model[1],model[2],model[3],model[4],model[5],model[6]
chisq_sep, chisq_PA = [],[]
for i in range(n):
modelsep, modelPA = absolute_astrometry(d,JD[i],smjr_ax,P,ecc,inclin,bOmega,omega,tP,False,'sep,PA')
chisq_sep.append( ((sep[i]-modelsep)/sepunc[i])**2 )
chisq_PA.append( ((PA[i]-modelPA)/PAunc[i])**2 )
# [2015-12-10]: one RV data point from Snellen et al. (2014)
modelRV = absolute_astrometry(d,convert_gcal('2013-12-17','jdd'),smjr_ax,P,ecc,inclin,bOmega,omega,tP,False,'RV')
chisq_RV = ((-15.4-modelRV)/1.7)**2
return chisq_sep, chisq_PA, chisq_RV
def julian_day(jdy):
return sum(jcal2jd(int(jdy),1,1+int(round((jdy%1)*365.25,0))))
def julian_year(jdd):
jcal = jd2jcal(jdd,0)
jdy = jcal[0] +(jcal[1]-1)/12. +(jcal[2]-1)/365.25
return round(jdy,3)
def convert_gcal(gcd,jdw):
gfmt = '%Y-%m-%d' ### Geogorian date format in data file
jdt = datetime.datetime.strptime(gcd,gfmt).timetuple()
jdd = sum(gcal2jd(jdt.tm_year,jdt.tm_mon,jdt.tm_mday))
if jdw == 'jdd': return jdd
elif jdw == 'jdy': return julian_year(jdd)
def init_all(w81):
param_list = ['semimajor axis','orbital period','eccentricity','inclination','big Omega','omega','t_periastron']
param_ct = len(param_list)
if w81 == False: sdata = 1 # not using 1981-11-10 event as data point
elif w81 == True: sdata = 2 # do use 1981-11-10 event as data point
nchain = 8
swapInt = 1e4
models = []
for i in range(nchain):
#models.append(initial_guesses(1))
models.append(initial_guesses(''))
#Tmin, Tmax = 1, 8
#temp_ladder = np.arange(Tmin,Tmax+1,1).tolist()
#logTmin,logTmax = 0,4 ### v3
#temp_ladder = (10**(np.arange(logTmin,logTmax,0.5))).tolist()
logTmin,logTmax = 0,5 ### v4
temp_ladder = (10**(np.arange(logTmin,logTmax,0.71428571))).tolist()
# jump half-ranges (uniform distribution)
# [2015-12-10]: switched to uniform in log(a) and cos(i)
# [2005-12-12]: switched back to uniform in (a)
jhr = [AU_to_cm(0.08), yrs_to_day(0.3), 1e-2, 5e-2, deg_to_rad(0.8), deg_to_rad(0.8), 21.]
# define edges of parameter space
psmin = [AU_to_cm(1), yrs_to_day(5), 1e-9, deg_to_rad(0), deg_to_rad(-180), deg_to_rad(-180), julian_day(1980)]
psmax = [AU_to_cm(25), yrs_to_day(60), 0.8, deg_to_rad(180), deg_to_rad(180), deg_to_rad(180), julian_day(2015)]
# get real data from file
real_data = get_data(sdata)
return param_list, param_ct, nchain, swapInt, jhr, psmin, psmax, real_data, models, temp_ladder
def initial_guesses(init):
if init == 1: ### MCMC result from Chauvin et al. (2012)
return [AU_to_cm(8.8), yrs_to_day(19.6), 0.021, deg_to_rad(88.5), deg_to_rad(-148.24), deg_to_rad(-115.0), julian_day(2006.3)]
elif init == 2: ### chi-squared minimization result from Chauvin et al. (2012)
return [AU_to_cm(11.2), yrs_to_day(28.3), 0.16, deg_to_rad(88.8), deg_to_rad(-147.73), deg_to_rad(4.0), julian_day(2013.3)]
else:
smjr_ax = AU_to_cm(random.uniform(5,15))
P = yrs_to_day(random.uniform(15,25))
ecc = random.uniform(0.001,0.1)
inclin = arccos(random.uniform(-0.2,0.2)) ### uniform in cos(i)
bOmega = deg_to_rad(random.uniform(-160,-140))
omega = deg_to_rad(random.uniform(-120,-100))
tP = random.uniform(julian_day(2005),julian_day(2010))
return [smjr_ax,P,ecc,inclin,bOmega,omega,tP]
def mcmc_fit(swapInt,model,wnum,burnIn,verbose,param_list,param_ct,psmin,psmax,jhr,real_data,T,output,it):
t0 = time.time()
init_model = np.copy(model)
acc_rate,mchain = [],[]
for i in range(param_ct):
acc_rate.append([])
mchain.append([])
tpsmin = np.zeros(param_ct) # modified parameter space edge to reflect uniform priors in cos(i)
tpsmax = np.zeros(param_ct)
for i in range(param_ct):
if i == 3: # prior for inclination is uniform in cos(i)
tpsmin[i] = (-1.)
tpsmax[i] = (1.)
else:
tpsmin[i] = (psmin[i])
tpsmax[i] = (psmax[i])
jump_ct = 0
for counter in range(int(swapInt)):
trial_val = -1e30 # temporary placeholder value
var = (counter%param_ct) # which parameter (Gibbs sampler)
while not (tpsmin[var] < trial_val < tpsmax[var]): # do until new value falls within parameter space
if var == 3: trial_val = cos(model[var]) +random.uniform(-jhr[var],jhr[var])
else: trial_val = model[var] +random.uniform(-jhr[var],jhr[var])
if var == 3: prop_val = arccos(trial_val)
else: prop_val = trial_val
new_model = np.append(model[:var],prop_val)
new_model = np.append(new_model,model[var+1:])
prob_acc = prob_accept(real_data,model,new_model,var,jhr,psmin,psmax,T)
if np.random.rand() < prob_acc:
model = new_model
jump_ct += 1
# [2015-12-16] burn-in reduced from 1e3 to 1e2 accepted jumps
# [2015-12-18] burn-in reverted back to 1e3 accepted jumps, with corresponding change to 1e4 iterations between swap proposals
if (jump_ct >= 1e3) or (burnIn == False):
acc_rate[var].append(prob_acc)
if (counter%1e2) == 0:
for i in range(param_ct):
if i==6: mchain[i].append(julian_year(model[i]))
else: mchain[i].append(model[i])
if ((counter%1e2)==0 and verbose) or ((counter%2e3)==0) or (counter<1e4 and (counter%1e3)==0):
print('walker '+str(wnum+1), '%6s'%jump_ct, '%6s'%counter, '%6s'%'%.3f'%prob_acc, '%6s'%'%.2f'%(cm_to_AU(model[0])),'%6s'%'%.2f'%(day_to_yrs(model[1])),'%6s'%'%.2f'%(model[2]),'%6s'%'%.2f'%(rad_to_deg(model[3])),'%6s'%'%.2f'%(rad_to_deg(model[4])),'%6s'%'%.2f'%(rad_to_deg(model[5])),'%6s'%'%.2f'%(julian_year(model[6])))
if True: #wnum == 0:
print('')
print('walker number: '+str(wnum+1))
print('semimajor axis : '+'median value = %6s'%'%.6f'%(cm_to_AU(np.median(mchain[0]))) \
+', initial value = %6s'%'%.6f'%(cm_to_AU(init_model[0])) \
+', acceptance rate = %.3f'%(np.mean(acc_rate[0])))
print('orbital period : '+'median value = %6s'%'%.6f'%(day_to_yrs(np.median(mchain[1]))) \
+', initial value = %6s'%'%.6f'%(day_to_yrs(init_model[1])) \
+', acceptance rate = %.3f'%(np.mean(acc_rate[1])))
print('eccentricity : '+'median value = %6s'%'%.6f'%(np.median(mchain[2])) \
+', initial value = %6s'%'%.6f'%(init_model[2]) \
+', acceptance rate = %.3f'%(np.mean(acc_rate[2])))
print('inclination : '+'median value = %6s'%'%.6f'%(rad_to_deg(np.median(mchain[3]))) \
+', initial value = %6s'%'%.6f'%(rad_to_deg(init_model[3])) \
+', acceptance rate = %.3f'%(np.mean(acc_rate[3])))
print('big Omega : '+'median value = %6s'%'%.6f'%(rad_to_deg(np.median(mchain[4]))) \
+', initial value = %6s'%'%.6f'%(rad_to_deg(init_model[4])) \
+', acceptance rate = %.3f'%(np.mean(acc_rate[4])))
print('omega : '+'median value = %6s'%'%.6f'%(rad_to_deg(np.median(mchain[5]))) \
+', initial value = %6s'%'%.6f'%(rad_to_deg(init_model[5])) \
+', acceptance rate = %.3f'%(np.mean(acc_rate[5])))
print('t_periastron : '+'median value = %6s'%'%.6f'%(np.median(mchain[6])) \
+', initial value = %6s'%'%.6f'%(julian_year(init_model[6])) \
+', acceptance rate = %.3f'%(np.mean(acc_rate[6])))
print('')
print('walker number: '+str(wnum+1))
print('time elapsed: %.1f seconds' % (time.time()-t0))
print('')
print('jump half-ranges: '+str(['%.3f AU'%(cm_to_AU(jhr[0])), '%.3f yrs'%(day_to_yrs(jhr[1])), '%.3f (eccentricity)'%(jhr[2]), '%.3e (cos i)'%(jhr[3]), '%.3f deg'%(rad_to_deg(jhr[4])), '%.3f deg'%(rad_to_deg(jhr[5])), '%.3f days'%(jhr[6])]))
print('')
print(' ###### saving results from (T = %.1f) chain at: '%(T)+time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()))
results_list = []
results_list.append(cm_to_AU(np.array(mchain[0])))
results_list.append(day_to_yrs(np.array(mchain[1])))
results_list.append((np.array(mchain[2])))
results_list.append(rad_to_deg(np.array(mchain[3])))
results_list.append(rad_to_deg(np.array(mchain[4])))
results_list.append(rad_to_deg(np.array(mchain[5])))
results_list.append((np.array(mchain[6])))
accepted = open('accepted_orbits_logT%.1f'%(log10(T))+'_it%03d'%(it+1)+'.dat','w')
accepted.write('# \t')
for i in range(param_ct):
accepted.write(' '+param_list[i]+'\t')
accepted.write('\n')
for k in range(len(mchain[0])):
for i in range(param_ct):
accepted.write(' '+'%8s'%'%.5f'%(results_list[i][k])+'\t')
accepted.write('\n')
for i in range(len(mchain[6])):
mchain[6][i] = julian_day(mchain[6][i])
last_posit = model
avg_acc_rate = []
for i in range(param_ct):
avg_acc_rate.append('%.3f'%np.mean(acc_rate[i]))
output.put([wnum, last_posit, avg_acc_rate])
plot_posterior(results_list,it+1,T,False)
print(' ###### mcmc_fit() finished for walker '+str(wnum+1)+', iteration '+str(it+1)+' at: '+time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()))
gc.collect()
def absolute_astrometry(d,t,smjr_ax,P,ecc,inclin,bOmega,omega,tP,verbose,output):
### mean motion (n)
n = 2 *pi() /P
### mean anomaly (M)
M = n *(t-tP) ### (t-tP) gives time since periapsis
'''
### solve Kepler's equation
### M = E -ecc *sin(E)
### use Newton-Raphson method
### f(E) = E -ecc*sin(E) -M(t)
### E_(n+1) = E_n - f(E_n) /f'(E_n)
### = E_n - (E_n -ecc*sin(E_n) -M(t)) /(1 -ecc*cos(E_n))
'''
### eccentric anomaly (E)
if ecc <= 0.8: E = M
else: E = pi()
counter = 0
tloop = time.time()
while np.abs(g(E,M,ecc)) > 1e-5:
E = E - (E -ecc*sin(E) -M) /(1 -ecc*cos(E))
counter += 1
if verbose and counter%10 == 0: print(counter,round(time.time()-tloop,5))
### true anomaly (f)
f = 2 * arctan(sqrt((1+ecc)/(1-ecc))*tan(E/2.))
### true separation
r = smjr_ax *(1 -ecc**2) /(1 +ecc *cos(f))
### absolute astrometry
X = r *(cos(bOmega)*cos(omega+f)-sin(bOmega)*sin(omega+f)*cos(inclin))
Y = r *(sin(bOmega)*cos(omega+f)+cos(bOmega)*sin(omega+f)*cos(inclin))
### Kepler's third law
M_sys = 4*pi()**2/G *smjr_ax**3 /P**2
### switch to center-of-mass frame
CM_X = X /M_sys #*m2/(m1 +m2)
CM_Y = Y /M_sys #*m2/(m1 +m2)
rawarctan = arctan((Y-CM_Y)/(X-CM_X))
if (Y-CM_Y)>=0 and (X-CM_X)>=0: PA_m2 = rawarctan # quadrant I
elif (X-CM_X)<0: PA_m2 = rawarctan +pi() # quadrants II and III
else: PA_m2 = rawarctan +2*pi() # quadrant IV
proj_sep = sqrt((X-CM_X)**2 +(Y-CM_Y)**2) # physical separation in cm
proj_sep = rad_to_deg(proj_sep/pc_to_cm(d)) *3600 *1000 # angular separation in mas
PA = rad_to_deg(PA_m2)
if PA >= 360: PA -= 360
### calculate RVs with respect to stationary barycenter (CM)
RV_m1 = m2 /M_sys *(sec_to_day(n) *smjr_ax *sin(inclin)) /sqrt(1 -ecc**2) *(cos(omega+f) +ecc*cos(omega))
RV_m2 = -RV_m1 *(M_sys-m2) /m2
#RV_m1 = m2 /(m1 +m2) *(sec_to_day(n) *smjr_ax *sin(inclin)) /sqrt(1 -ecc**2) *(cos(omega+f) +ecc*cos(omega))
#RV_m2 = -RV_m1 *m1 /m2
if output == 'sep,PA': return proj_sep, PA
elif output == 'RA,dec': return (rad_to_deg((Y-CM_Y)/pc_to_cm(d))*3600*1000),(rad_to_deg((X-CM_X)/pc_to_cm(d))*3600*1000),(RV_m1*1e-5),(RV_m2*1e-5)
elif output == 'RV': return (RV_m2*1e-5)
def get_data(arg):
JD, sep, sepunc, PA, PAunc = [],[],[],[],[]
if arg == 1: data = open('data_bPicb.dat','r')
elif arg == 2: data = open('data_bPicb_w81.dat','r')
for line in data.readlines():
if not line.startswith('#'):
thisline = line.split()
gcal = str(thisline[0])
JD.append(convert_gcal(gcal,'jdd'))
sep.append(float(thisline[1]))
sepunc.append(float(thisline[2]))
PA.append(float(thisline[3]))
PAunc.append(float(thisline[4]))
data.close()
return JD, sep, sepunc, PA, PAunc
### function to be evaluated in Newton-Raphson method
def g(E,M,ecc): return E -ecc*sin(E) -M
### define constants in cgs units
h = const.h *1e7 ### erg s
c = const.c *1e2 ### cm s-1
k = const.k *1e7 ### erg K-1
G = const.G *1e3 ### cm3 g-1 s-2
AU = 1.496e13 ### cm
sigma = const.sigma *1e3 ### g s-3 K-4
R_Sun = 6.955e10 ### cm
M_Sun = 1.989e33 ### grams
M_Jup = 1.898e30 ### grams
M_Earth = 5.972e27 ### grams
### helper functions
def pi(): return np.pi
def ln(x): return np.log(x)
def exp(x): return np.exp(x)
def sqrt(x): return np.sqrt(x)
def sin(x): return np.sin(x)
def cos(x): return np.cos(x)
def tan(x): return np.tan(x)
def arcsin(x): return np.arcsin(x)
def arccos(x): return np.arccos(x)
def arctan(x): return np.arctan(x)
def arctan2(x,y): return np.arctan2(x,y)
def arccos(x): return np.arccos(x)
def log10(x): return np.log10(x)
def ln(x): return np.log(x)
### unit conversions
def rad_to_deg(rad): return rad *180. /pi()
def deg_to_rad(deg): return deg /180. *pi()
def sec_to_yrs(sec): return sec /60. /60 /24 /365.256362
def yrs_to_sec(yrs): return yrs *60. *60 *24 *365.256362
def day_to_yrs(day): return day /365.256362
def yrs_to_day(yrs): return yrs *365.256362
def sec_to_day(sec): return sec /60. /60 /24
def day_to_sec(day): return day *24. *60 *60
def hrs_to_sec(hrs): return hrs *60. *60.
def cm_to_AU(cm): return cm *6.68458712e-14
def AU_to_cm(AU): return AU *1.496e13
def cm_to_Rsun(cm): return cm /R_Sun
def Rsun_to_cm(Rsun): return Rsun *R_Sun
def pc_to_cm(pc): return pc *3.086e18
def g_to_Mearth(g): return g /5.972e27
### beta Pic system
d = 19.3 # pc (Hipparcos; Crifo et al. 1997)
m1 = 1.61 *M_Sun # grams
m2 = 7.0 *M_Jup # grams
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator,ScalarFormatter,FormatStrFormatter,NullFormatter
from pylab import rcParams
rcParams['xtick.direction'] = 'in'
rcParams['ytick.direction'] = 'in'
from matplotlib import font_manager
from matplotlib import rc
def hist_post_at_temp(it,T):
mchain = get_mcsample(1,it+1,T,'hist')
plot_posterior(mchain,it+1,T,True)
mchain = []
gc.collect()
def plot_posterior(mchain,it,T,cu):
fontproperties = {'family':'serif','serif':['cmr'],'weight':'normal','size':11}
rc('text', usetex=True)
rc('font', **fontproperties)
plt.close()
fig = plt.figure(1,figsize=(7.5,10.5))
ax = [fig.add_subplot(521),fig.add_subplot(522),fig.add_subplot(523),fig.add_subplot(524),fig.add_subplot(525),fig.add_subplot(526),fig.add_subplot(527),fig.add_subplot(529)]
#fig = plt.figure(1,figsize=(8.0,9.0))
#ax = [fig.add_subplot(421),fig.add_subplot(422),fig.add_subplot(423),fig.add_subplot(424),fig.add_subplot(425),fig.add_subplot(426),fig.add_subplot(427),fig.add_subplot(428)]
### 8th subplot overcomes bug in matplotlib histogram (as of 2015-12-03)
### see http://stackoverflow.com/questions/29791119/extra-bar-in-the-first-bin-of-a-pyplot-histogram
### see also: /home/leung/coursework/ast381/term_project/code/hist_v16.pdf
###
#fig = plt.figure(1,figsize=(8.0,6.6))
#ax = [fig.add_subplot(321),fig.add_subplot(322),fig.add_subplot(323),fig.add_subplot(324),fig.add_subplot(325),fig.add_subplot(326)]
#majorFormatter = FormatStrFormatter('%.0f') #('%d')
#xmajorLocator = MultipleLocator(20)
#xminorLocator = MultipleLocator(5)
plot_list = mchain
xlabels = ['$a$ \ (AU)','$P$ \ (yr)','$e$','$i$ \ ($^{\circ}$)',r'$\Omega$ \ ($^{\circ}$)',r'$\omega$ \ ($^{\circ}$)','$t_{\mathrm{p}}$ \ [yr JD]']
majloc = [5, 10, 0.1, 5, 5, 60, 5]
minloc = [1, 2, 0.02, 1, 1, 10, 1]
majfmt = ['%.0f','%.0f','%.1f','%.0f','%.0f','%.0f','%.0f']
xmax = [23, 60, 0.8, 98, -138, 60, 2015]
xmin = [ 2, 10, 0, 82, -157, -180, 2000]
#majloc = [5, 5, 0.1, 30, 60, 60, 5]
#minloc = [1, 1, 0.02, 10, 20, 20, 1]
#majfmt = ['%.0f','%.0f','%.1f','%.0f','%.0f','%.0f','%.0f']
#xmax = [25, 60, 0.8, 180, 180, 180, 2015]
#xmin = [ 0, 5, 0, 0, -180, -180, 1995]
for j in range(len(ax)-1):
if j==3: tbins = 1200
elif j==4: tbins = 1600
else: tbins = 200
ax[j].hist(plot_list[j],bins=tbins,color='green',histtype='step',alpha=0.8,lw=1.6)
ax[j].set_xlabel(xlabels[j])
ax[j].set_ylabel('$N_{\mathrm{orbits}}$')
ax[j].set_xlim([xmin[j],xmax[j]])
ax[j].xaxis.set_major_locator(MultipleLocator(majloc[j]))
ax[j].xaxis.set_minor_locator(MultipleLocator(minloc[j]))
ax[j].xaxis.set_major_formatter(FormatStrFormatter(majfmt[j]))
ax[j].spines['left'].set_linewidth(0.45)
ax[j].spines['right'].set_linewidth(0.45)
ax[j].spines['top'].set_linewidth(0.45)
ax[j].spines['bottom'].set_linewidth(0.25)
if cu:
a = plt.hist(plot_list[j],bins=200)
if j==0: print('most probable values: ')
print(float(a[1][np.argmax(a[0])]))
#if j==6: print(a[1])
print('')
fig.tight_layout()
if cu:
plt.savefig('hist_all_logT%.1f'%(log10(T))+'_it%03d'%(it)+'.pdf')
#plt.savefig('hist_all.pdf')
else:
plt.savefig('hist_logT%.1f'%(log10(T))+'_it%03d'%(it)+'.pdf')
#plt.savefig('hist.pdf')
plt.close()
def plot_position(param,ver):
fontproperties = {'family':'serif','serif':['cmr'],'weight':'normal','size':14}
rc('text', usetex=True)
rc('font', **fontproperties)
plt.close()
fig = plt.figure(1,figsize=(6.0,7.0))
ax = [fig.add_subplot(211),fig.add_subplot(212)]
JD_data, sep, sep_unc, PA, PA_unc = get_data(1)
RA_data,dec_data,jdy_data,error_bar = [],[],[],[]
for i in range(len(JD_data)):
jdy_data.append(julian_year(JD_data[i]))
RA_data.append(sep[i]*sin(deg_to_rad(PA[i])))
dec_data.append(sep[i]*cos(deg_to_rad(PA[i])))
total_sqerr = (sep[i]*deg_to_rad(PA_unc[i]))**2 +(sep_unc[i])**2
err_bar = sqrt(0.5*total_sqerr)
error_bar.append(err_bar)
RV_data = -15.4
RV_err = 1.7
data_list = [[RA_data,dec_data],[RV_data]]
label_list = [[r'$\Delta \alpha$ (mas)',r'$\Delta \delta$ (mas)'],'Radial velocity \ [km s$^{-1}$]']
color_list = [['green','orange'],['blue']]
smjr_ax,P,ecc,inclin,bOmega,omega,tP = AU_to_cm(param[0]),yrs_to_day(param[1]),param[2],deg_to_rad(param[3]),deg_to_rad(param[4]),deg_to_rad(param[5]),julian_day(param[6])
JD = np.arange(julian_day(1980),julian_day(2020),7)
RA_list,dec_list,jdy,RV_list = [],[],[],[]
for i in range(len(JD)):
RA, dec, RV_m1, RV_m2 = absolute_astrometry(d,JD[i],smjr_ax,P,ecc,inclin,bOmega,omega,tP,False,'RA,dec')
RA_list.append(RA)
dec_list.append(dec)
RV_list.append(RV_m2)
jdy.append(julian_year(JD[i]))
plot_list = [[RA_list,dec_list],[RV_list]]
for j in range(len(plot_list)):
for k in range(len(plot_list[j])):
if j==0:
ax[j].plot(jdy,plot_list[j][k],color=color_list[j][k],marker='',ls='-',ms=1,lw=1.2,alpha=0.6,label=label_list[j][k])
else:
ax[j].plot(jdy,plot_list[j][k],color=color_list[j][k],marker='',ls='-',ms=1,lw=1.2,alpha=0.6,label='RV (km s$^{-1}$)')
ax[j].plot([1980,2020],[0,0],'k-',ls='dashed',lw=0.2)
if j==0:
gcal = '1981-11-10'
jd81 = convert_gcal(gcal,'jdy')
ax[j].scatter([jd81],[0],marker='x',edgecolor='blue',lw=2,s=36)
for k in range(len(plot_list[j])):
ax[j].errorbar(jdy_data,data_list[j][k],yerr=[error_bar,error_bar],fmt='none',alpha=0.6,lw=0.6,ecolor='red',capthick=0.6)
ax[j].scatter(jdy_data,data_list[j][k],marker='o',edgecolor='red',c='red',alpha=0.8,lw=0.2,s=6)
else:
gcal = '2013-12-17'
jd13 = convert_gcal(gcal,'jdy')
ax[j].scatter([jd13],[RV_data],marker='o',edgecolor='red',c='red',alpha=0.8,lw=0.5,s=12)
ax[j].errorbar([jd13],[RV_data],yerr=RV_err,fmt='none',alpha=0.6,lw=0.6,ecolor='red',capthick=0.6)
ax[j].set_xlabel('JD \ [year]')
if j == 0: ax[j].set_ylabel('Angular separation \ [mas]')
elif j == 1: ax[j].set_ylabel(label_list[j])
ax[j].set_xlim([1980,2020])
ax[j].xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax[j].xaxis.set_major_locator(MultipleLocator(5))
ax[j].xaxis.set_minor_locator(MultipleLocator(1))
ax[j].spines['left'].set_linewidth(0.9)
ax[j].spines['right'].set_linewidth(0.9)
ax[j].spines['top'].set_linewidth(0.9)
ax[j].spines['bottom'].set_linewidth(0.9)
ld = ax[j].legend(loc='upper right',shadow=False,labelspacing=0.25,borderpad=0.12)
frame = ld.get_frame()
frame.set_lw(0)
for label in ld.get_texts(): label.set_fontsize('medium')
for label in ld.get_lines(): label.set_linewidth(1.2)
fig.tight_layout()
plt.savefig('orbit_v'+str(ver)+'.pdf')
plt.savefig('orbit.pdf')
plt.close()
def plot_all(mparam,ver):
t0 = time.time()
fontproperties = {'family':'serif','serif':['cmr'],'weight':'normal','size':14}
rc('text', usetex=True)
rc('font', **fontproperties)
plt.close()
fig = plt.figure(1,figsize=(6.0,7.0))
ax = [fig.add_subplot(211),fig.add_subplot(212)]
JD_data, sep, sep_unc, PA, PA_unc = get_data(1)
RA_data,dec_data,jdy_data,error_bar = [],[],[],[]
for i in range(len(JD_data)):
jdy_data.append(julian_year(JD_data[i]))
RA_data.append(sep[i]*sin(deg_to_rad(PA[i])))
dec_data.append(sep[i]*cos(deg_to_rad(PA[i])))
total_sqerr = (sep[i]*deg_to_rad(PA_unc[i]))**2 +(sep_unc[i])**2
err_bar = sqrt(0.5*total_sqerr)
error_bar.append(err_bar)
RV_data = -15.4
RV_err = 1.7
data_list = [[RA_data,dec_data],[RV_data]]
label_list = [[r'$\Delta \alpha$ (mas)',r'$\Delta \delta$ (mas)'],'Radial velocity \ [km s$^{-1}$]']
color_list = [['green','orange'],['blue']]
if not (type(ver) == int):
print('about to plot %.0f orbits' %len(mparam))
this = 0.26
else: this = 0.48
# plot orbits
for p in range(len(mparam)):
if (p <= 10) or ((p%10) == 0): print(p, '%.1f'%(time.time()-t0))
param = mparam[p]
smjr_ax,P,ecc,inclin,bOmega,omega,tP = AU_to_cm(param[0]),yrs_to_day(param[1]),param[2],deg_to_rad(param[3]),deg_to_rad(param[4]),deg_to_rad(param[5]),julian_day(param[6])
JD = np.arange(julian_day(1980),julian_day(2025),7)
RA_list,dec_list,jdy,RV_list = [],[],[],[]
for i in range(len(JD)):
RA, dec, RV_m1, RV_m2 = absolute_astrometry(d,JD[i],smjr_ax,P,ecc,inclin,bOmega,omega,tP,False,'RA,dec')
RA_list.append(RA)
dec_list.append(dec)
RV_list.append(RV_m2)
jdy.append(julian_year(JD[i]))
plot_list = [[RA_list,dec_list],[RV_list]]
for j in range(len(plot_list)):
for k in range(len(plot_list[j])):
if j==0:
if p==0: ax[j].plot(jdy,plot_list[j][k],color=color_list[j][k],marker='',ls='-',ms=1,lw=this,alpha=this,label=label_list[j][k])
else: ax[j].plot(jdy,plot_list[j][k],color=color_list[j][k],marker='',ls='-',ms=1,lw=this,alpha=this)
else:
if p==0: ax[j].plot(jdy,plot_list[j][k],color=color_list[j][k],marker='',ls='-',ms=1,lw=this,alpha=this,label='RV (km s$^{-1}$)')
else: ax[j].plot(jdy,plot_list[j][k],color=color_list[j][k],marker='',ls='-',ms=1,lw=this,alpha=this)
ax[j].plot([1980,2025],[0,0],'k-',ls='dashed',lw=0.2,color='gray')
# plot data points
for j in range(len(plot_list)):
if j==0:
gcal = '1981-11-10'
jd81 = convert_gcal(gcal,'jdy')
ax[j].scatter([jd81],[0],marker='x',edgecolor='blue',lw=2.6,s=36)
for k in range(len(plot_list[j])):
ax[j].scatter(jdy_data,data_list[j][k],marker='o',edgecolor='red',c='red',lw=0.8,s=3)
ax[j].errorbar(jdy_data,data_list[j][k],yerr=[error_bar,error_bar],fmt='none',alpha=0.6,lw=0.6,ecolor='red',capsize=1.2,capthick=0.6)
else:
gcal = '2013-12-17'
jd13 = convert_gcal(gcal,'jdy')
ax[j].scatter([jd13],[RV_data],marker='o',edgecolor='red',c='red',lw=0.8,s=12)
ax[j].errorbar([jd13],[RV_data],yerr=RV_err,fmt='none',alpha=0.6,lw=0.6,ecolor='red',capsize=1.2,capthick=0.6)
ax[j].set_xlabel('JD \ [year]')
if j == 0: ax[j].set_ylabel('Angular separation \ [mas]')
elif j == 1: ax[j].set_ylabel(label_list[j])
ax[j].set_xlim([1980,2025])
if j == 0: ax[j].set_ylim([-600,800])
elif j == 1: ax[j].set_ylim([-30,30])
ax[j].xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax[j].xaxis.set_major_locator(MultipleLocator(5))
ax[j].xaxis.set_minor_locator(MultipleLocator(1))
ax[j].spines['left'].set_linewidth(0.9)
ax[j].spines['right'].set_linewidth(0.9)
ax[j].spines['top'].set_linewidth(0.9)
ax[j].spines['bottom'].set_linewidth(0.9)
ld = ax[j].legend(loc='upper right',shadow=False,labelspacing=0.1,borderpad=0.12)
ld.get_frame().set_lw(0)
ld.get_frame().set_alpha(0.0)
for label in ld.get_texts(): label.set_fontsize('small')
for label in ld.get_lines(): label.set_linewidth(1)
fig.tight_layout()
plt.savefig('many_orbits_it'+str(ver)+'.pdf')
plt.savefig('many_orbits.pdf')
plt.close()
def get_mcsample(start,end,T,whatfor):
param_list = ['semimajor axis','orbital period','eccentricity','inclination','big Omega','omega','t_periastron']
param_ct = len(param_list)
params = []
if (not whatfor == 'plot'):
for i in range(param_ct): params.append([])
for v in range(start,end+1):
data = open('accepted_orbits_logT%.1f'%(log10(T))+'_it%03d'%(v)+'.dat','r')
for line in data.readlines():
if not line.startswith('#'):
thisline = line.split()
for i in range(param_ct):
params[i].append(float(thisline[i]))
data.close()
if whatfor == 'hist':
return params
elif whatfor == 'stat':
import scipy.stats as ss
conf_int = []
for i in range(param_ct):
median = ss.scoreatpercentile(params[i],50)
msigma = ss.scoreatpercentile(params[i],16) -median
psigma = ss.scoreatpercentile(params[i],84) -median
conf_int.append([param_list[i],'%6s'%'%.3f'%median,'%6s'%'%.3f'%msigma,'%6s'%'+%.3f'%psigma])
print(conf_int[i])
#return conf_int
elif whatfor == 'plot':
for v in range(start,end+1):
data = open('accepted_orbits_logT%.1f'%(log10(T))+'_it%03d'%(v)+'.dat','r')
for line in data.readlines():
if not line.startswith('#'):
thisline = line.split()
floatparam = []
for i in range(param_ct):
floatparam.append(float(thisline[i]))
params.append(floatparam)
return params
def prob_81nov(maxit):
t0 = time.time()
params = get_mcsample(1,maxit,1,'plot')
chdate = convert_gcal('1981-11-10','jdd')
ang_size = 0.84 # angular size of beta Pic in mas (Kervella et al. 2004)
transit_ct = 0
for i in range(len(params)):
param = params[i]
smjr_ax,P,ecc,inclin,bOmega,omega,tP = AU_to_cm(param[0]),yrs_to_day(param[1]),param[2],deg_to_rad(param[3]),deg_to_rad(param[4]),deg_to_rad(param[5]),julian_day(param[6])
modelsep, modelPA = absolute_astrometry(d,chdate,smjr_ax,P,ecc,inclin,bOmega,omega,tP,False,'sep,PA')
if (modelsep <= 0.5*ang_size): transit_ct += 1
if (i%1000) == 0: print(str(i)+' of '+str(len(params)), '%.1f seconds'%(time.time()-t0))
print(transit_ct/float(len(params)))
def upcoming_transit(maxit):
param_list = ['semimajor axis','orbital period','eccentricity','inclination','big Omega','omega','t_periastron']
param_ct = len(param_list)
transits = open('upcoming_transit.dat','w')
transits.write('# \t')
transits.write(' transit date (yr JD) \t transit date (JD) \t angular separation \t position angle \t')
for i in range(param_ct):
transits.write(' '+param_list[i]+'\t')
transits.write('\n')
params = get_mcsample(1,maxit,1,'plot')
for i in range(len(params)):
if (i%10)==0: print(' checking '+str(i)+' out of '+str(len(params))+' random orbits; current time is '+time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()))
param = params[i]
smjr_ax,P,ecc,inclin,bOmega,omega,tP = AU_to_cm(param[0]),yrs_to_day(param[1]),param[2],deg_to_rad(param[3]),deg_to_rad(param[4]),deg_to_rad(param[5]),julian_day(param[6])
dates = np.arange(julian_day(2016.5),julian_day(2019.5),0.5)
for j in range(len(dates)):
proj_sep, PA = absolute_astrometry(d,dates[j],smjr_ax,P,ecc,inclin,bOmega,omega,tP,False,'sep,PA')
if proj_sep <= 0.835/2:
transits.write(' %.3f \t %.1f \t %.1f \t %.1f '%(julian_year(dates[j]),dates[j],proj_sep,PA))
for k in range(len(param)):
transits.write(' %.3f'%(param[k])+'\t')
transits.write('\n')
transits.close()
import scipy.stats as ss
def plot_proj_orbits(endit,howmany):
fontproperties = {'family':'serif','serif':['cmr'],'weight':'normal','size':12}
rc('text', usetex=True)
rc('font', **fontproperties)
plt.close()
fig = plt.figure(1,figsize=(7.5,3.7))
oax = fig.add_subplot(111)
oax.spines['top'].set_color('none')
oax.spines['left'].set_color('none')
oax.spines['right'].set_color('none')
oax.spines['bottom'].set_color('none')
oax.tick_params(labelcolor='none',top='off',bottom='off',left='off',right='off')
params = get_mcsample(1,endit,1,'hist')
param_ct = len(params)
most_probable, median = [],[]
for j in range(param_ct):
if j==3: tbins = 1200
elif j==4: tbins = 1600
else: tbins = 200
a = plt.hist(params[j],bins=tbins)
most_probable.append(float(a[1][np.argmax(a[0])]))
median.append(ss.scoreatpercentile(params[j],50))
ax = [fig.add_subplot(121),fig.add_subplot(122)]
ang_size = 0.835 # angular size of beta Pic in mas (Kervella et al. 2004)
betaPic = plt.Circle((0,0),0.5*ang_size,color='orange',alpha=0.6,lw=0)
smjr_ax,P,ecc,inclin,bOmega,omega,tP = AU_to_cm(most_probable[0]),yrs_to_day(most_probable[1]),most_probable[2],deg_to_rad(most_probable[3]),deg_to_rad(most_probable[4]),deg_to_rad(most_probable[5]),julian_day(most_probable[6])
dates = np.arange(tP,tP+P,P/1000.)
RA_off, dec_off = [],[]
for j in range(len(dates)):
RA, dec, RV_m1, RV_m2 = absolute_astrometry(d,dates[j],smjr_ax,P,ecc,inclin,bOmega,omega,tP,False,'RA,dec')
RA_off.append(RA)
dec_off.append(dec)
for k in range(len(ax)):
ax[k].plot(RA_off,dec_off,color='green',marker='',ls='-',ms=1,lw=0.6,alpha=0.85,label='Most probable')
smjr_ax,P,ecc,inclin,bOmega,omega,tP = AU_to_cm(median[0]),yrs_to_day(median[1]),median[2],deg_to_rad(median[3]),deg_to_rad(median[4]),deg_to_rad(median[5]),julian_day(median[6])
dates = np.arange(tP,tP+P,P/1000.)
RA_off, dec_off = [],[]
for j in range(len(dates)):
RA, dec, RV_m1, RV_m2 = absolute_astrometry(d,dates[j],smjr_ax,P,ecc,inclin,bOmega,omega,tP,False,'RA,dec')
RA_off.append(RA)
dec_off.append(dec)
for k in range(len(ax)):
ax[k].plot(RA_off,dec_off,color='magenta',marker='',ls='-',ms=1,lw=0.55,alpha=0.65,label='Median')
params = get_mcsample(1,endit,1,'plot')
selected = []
for i in range(howmany):
rsample = int(random.uniform(0,len(params)))
selected.append(params[rsample])
for i in range(len(selected)):
param = selected[i]
smjr_ax,P,ecc,inclin,bOmega,omega,tP = AU_to_cm(param[0]),yrs_to_day(param[1]),param[2],deg_to_rad(param[3]),deg_to_rad(param[4]),deg_to_rad(param[5]),julian_day(param[6])
dates = np.arange(tP,tP+P,P/1000.)
RA_off, dec_off = [],[]
for j in range(len(dates)):
RA, dec, RV_m1, RV_m2 = absolute_astrometry(d,dates[j],smjr_ax,P,ecc,inclin,bOmega,omega,tP,False,'RA,dec')
RA_off.append(RA)
dec_off.append(dec)
#for k in range(len(ax)):
ax[1].plot(RA_off,dec_off,color='blue',marker='',ls='-',ms=1,lw=0.12,alpha=0.16)
JD_data, sep, sep_unc, PA, PA_unc = get_data(1)
RA_data,dec_data,jdy_data,error_bar = [],[],[],[]
for i in range(len(JD_data)):
jdy_data.append(julian_year(JD_data[i]))
RA_data.append(sep[i]*sin(deg_to_rad(PA[i])))
dec_data.append(sep[i]*cos(deg_to_rad(PA[i])))
total_sqerr = (sep[i]*deg_to_rad(PA_unc[i]))**2 +(sep_unc[i])**2
err_bar = sqrt(0.5*total_sqerr)
error_bar.append(err_bar)
ax[0].errorbar(RA_data,dec_data,xerr=error_bar,yerr=error_bar,fmt='none',alpha=0.6,lw=0.25,ecolor='red',capsize=0.6,capthick=0.2)
ax[0].scatter(RA_data,dec_data,marker='o',edgecolor='red',c='red',lw=0.2,s=0.8,alpha=0.6)
oax.set_xlabel(r'$\Delta \alpha$ \ (mas)',fontsize='large', fontweight='bold')
oax.set_ylabel(r'$\Delta \delta$ \ (mas)',fontsize='large', fontweight='bold')
oax.xaxis.labelpad = 12
oax.yaxis.labelpad = 16
#ax.grid()
axpar = [[600,-600],[16,-16]]
aypar = [[-600,600],[-16,16]]
axmaj = [200,4]
axmnr = [ 50,1]
#ax[0].scatter([0],[0],marker='+',edgecolor='orange',lw=2,s=60)
ld = ax[0].legend(loc='upper right',shadow=False,labelspacing=0.1,borderpad=0.12)
ld.get_frame().set_lw(0)
ld.get_frame().set_alpha(0.0)
for label in ld.get_texts(): label.set_fontsize('x-small')
for label in ld.get_lines(): label.set_linewidth(1)
for k in range(len(ax)):
ax[k].plot([axpar[k][0],axpar[k][1]],[0,0],'k-',ls=':',lw=0.36,color='gray')
ax[k].plot([0,0],[aypar[k][0],aypar[k][1]],'k-',ls=':',lw=0.36,color='gray')
ax[k].set_xlim([axpar[k][0],axpar[k][1]])
ax[k].set_ylim([aypar[k][0],aypar[k][1]])
ax[k].xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax[k].yaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax[k].xaxis.set_major_locator(MultipleLocator(axmaj[k]))
ax[k].xaxis.set_minor_locator(MultipleLocator(axmnr[k]))
ax[k].yaxis.set_major_locator(MultipleLocator(axmaj[k]))
ax[k].yaxis.set_minor_locator(MultipleLocator(axmnr[k]))
ax[k].spines['left'].set_linewidth(0.5)
ax[k].spines['right'].set_linewidth(0.5)
ax[k].spines['top'].set_linewidth(0.5)
ax[k].spines['bottom'].set_linewidth(0.5)
ax[1].add_patch(betaPic)
fig.tight_layout()
plt.savefig('projected_orbits_it1-'+str(endit)+'.pdf')
plt.savefig('projected_orbits.pdf')
plt.close()
def plot_mcorbits(start,end,howmany):
t0 = time.time()
params = get_mcsample(start,end,1,'plot')
ssize = len(params)
selected = []
for i in range(howmany):
rsample = int(random.uniform(0,ssize))
selected.append(params[rsample])
print('start plotting')
print(len(params),len(selected))
plot_all(selected,str(start)+'-'+str(end))
print('done plotting, took %.1f seconds' %((time.time()-t0)))
params,selected = [],[]
gc.collect()
def get_params(arg):
params = [[],[],[],[],[],[],[]]
if arg == '': data = open('fitted_orbital_parameters.dat','r')
elif arg == 'v1': data = open('fitted_orbital_parameters_2e5steps.dat','r')
for line in data.readlines():
if not line.startswith('#'):
thisline = line.split()
for i in range(len(params)):
params[i].append(float(thisline[i]))
data.close()
print('median parameter values: ')
print(' semimajor axis : '+'median value = %6s'%'%.3f'%(np.median(params[0]))+' AU')
print(' orbital period : '+'median value = %6s'%'%.3f'%(np.median(params[1]))+' yrs')
print(' eccentricity : '+'median value = %6s'%'%.3f'%(np.median(params[2])))
print(' inclination : '+'median value = %6s'%'%.3f'%(np.median(params[3]))+' deg')
print(' big Omega : '+'median value = %6s'%'%.3f'%(np.median(params[4]))+' deg')
print(' omega : '+'median value = %6s'%'%.3f'%(np.median(params[5]))+' deg')
print(' t_periastron : '+'median value = %6s'%'%.3f'%(np.median(params[6]))+' yr JD')
def check_swap_rates():
rates = [[],[],[],[],[],[],[],[]]
data = open('swap_acceptance_rate.dat','r')
for line in data.readlines():
if not line.startswith('#'):
thisline = line.split()
for i in range(1,9):
rates[i-1].append(float(thisline[i]))
data.close()
for i in range(len(rates)):
print(np.mean(rates[i]))
if __name__ == '__main__':
a = input('Enter \'hist\' or \'orb\' or \'stat\' or \'run\': ')
temp_ladder = (10**(np.arange(0,5,0.71428571))).tolist()
if a == 'run':
w81 = input('Fit 1981-11-10 event as data point? (Enter boolean) ')
t0 = time.time()
maxit = 999
pt_runs(maxit+1,w81)
plot_mcorbits(1,maxit,100)
plot_proj_orbits(maxit,100)
for T in temp_ladder: hist_post_at_temp(maxit,T)
prob_81nov(maxit)
upcoming_transit(maxit)
check_swap_rates()
tt0 = time.time()-t0
print('time elapsed: %.1f hours, %.1f minutes, %.1f seconds' % ((tt0/3600.),((tt0%3600)/60.),((tt0%60))))
else:
lit = input('Enter last iteration: ')
if a == 'hist':
for T in temp_ladder:
hist_post_at_temp(lit-1,T)
elif a == 'orb':
plot_mcorbits(1,lit-1,100)
plot_proj_orbits(lit,100)
elif a == 'stat':
get_mcsample(1,lit-1,1,a)
| lgpl-3.0 |
ECP-CANDLE/Benchmarks | Pilot3/P3B3/p3b3_baseline_keras2.py | 1 | 5635 | from __future__ import print_function
import numpy as np
from keras import backend as K
'''
from keras.layers import Input, Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.models import Model
from keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau
from sklearn.metrics import f1_score
'''
import os, sys, gzip
import keras
from keras import backend as K
import math
from keras.layers.core import Dense, Dropout
from keras import optimizers
from keras.layers import Input
from keras.models import Model
import keras_mt_shared_cnn
import argparse
import p3b3 as bmk
import candle
def initialize_parameters(default_model = 'p3b3_default_model.txt'):
# Build benchmark object
p3b3Bmk = bmk.BenchmarkP3B3(bmk.file_path, default_model, 'keras',
prog='p3b3_baseline', desc='Multi-task CNN for data extraction from clinical reports - Pilot 3 Benchmark 3')
# Initialize parameters
gParameters = candle.finalize_parameters(p3b3Bmk)
#bmk.logger.info('Params: {}'.format(gParameters))
return gParameters
def fetch_data(gParameters):
""" Downloads and decompresses the data if not locally available.
Since the training data depends on the model definition it is not loaded,
instead the local path where the raw data resides is returned
"""
path = gParameters['data_url']
fpath = candle.fetch_file(path + gParameters['train_data'], 'Pilot3', untar=True)
return fpath
def run_cnn( GP, train_x, train_y, test_x, test_y,
learning_rate = 0.01,
batch_size = 10,
epochs = 10,
dropout = 0.5,
optimizer = 'adam',
wv_len = 300,
filter_sizes = [3,4,5],
num_filters = [300,300,300],
emb_l2 = 0.001,
w_l2 = 0.01
):
max_vocab = np.max( train_x )
max_vocab2 = np.max( test_x )
if max_vocab2 > max_vocab:
max_vocab = max_vocab2
wv_mat = np.random.randn( max_vocab + 1, wv_len ).astype( 'float32' ) * 0.1
num_classes = []
num_classes.append( np.max( train_y[ :, 0 ] ) + 1 )
num_classes.append( np.max( train_y[ :, 1 ] ) + 1 )
num_classes.append( np.max( train_y[ :, 2 ] ) + 1 )
num_classes.append( np.max( train_y[ :, 3 ] ) + 1 )
kerasDefaults = candle.keras_default_config()
optimizer_run = candle.build_optimizer( optimizer, learning_rate, kerasDefaults )
cnn = keras_mt_shared_cnn.init_export_network(
num_classes= num_classes,
in_seq_len= 1500,
vocab_size= len( wv_mat ),
wv_space= wv_len,
filter_sizes= filter_sizes,
num_filters= num_filters,
concat_dropout_prob = dropout,
emb_l2= emb_l2,
w_l2= w_l2,
optimizer= optimizer_run )
print( cnn.summary() )
validation_data = ( { 'Input': test_x },
{ 'Dense0': test_y[ :, 0 ],
'Dense1': test_y[ :, 1 ],
'Dense2': test_y[ :, 2 ],
'Dense3': test_y[ :, 3 ] } )
# candleRemoteMonitor = CandleRemoteMonitor(params= GP)
# timeoutMonitor = TerminateOnTimeOut(TIMEOUT)
candleRemoteMonitor = candle.CandleRemoteMonitor( params= GP )
timeoutMonitor = candle.TerminateOnTimeOut( GP[ 'timeout' ] )
history = cnn.fit(
x= np.array( train_x ),
y= [ np.array( train_y[ :, 0 ] ),
np.array( train_y[ :, 1 ] ),
np.array( train_y[ :, 2 ] ),
np.array( train_y[ :, 3 ] ) ],
batch_size= batch_size,
epochs= epochs,
verbose= 2,
validation_data= validation_data,
callbacks = [candleRemoteMonitor, timeoutMonitor]
)
return history
def run(gParameters):
fpath = fetch_data(gParameters)
# Get default parameters for initialization and optimizer functions
kerasDefaults = candle.keras_default_config()
learning_rate = gParameters[ 'learning_rate' ]
batch_size = gParameters[ 'batch_size' ]
epochs = gParameters[ 'epochs' ]
dropout = gParameters[ 'dropout' ]
optimizer = gParameters[ 'optimizer' ]
wv_len = gParameters[ 'wv_len' ]
filter_sizes = gParameters[ 'filter_sizes' ]
filter_sets = gParameters[ 'filter_sets' ]
num_filters = gParameters[ 'num_filters' ]
emb_l2 = gParameters[ 'emb_l2' ]
w_l2 = gParameters[ 'w_l2' ]
train_x = np.load( fpath + '/train_X.npy' )
train_y = np.load( fpath + '/train_Y.npy' )
test_x = np.load( fpath + '/test_X.npy' )
test_y = np.load( fpath + '/test_Y.npy' )
for task in range( len( train_y[ 0, : ] ) ):
cat = np.unique( train_y[ :, task ] )
train_y[ :, task ] = [ np.where( cat == x )[ 0 ][ 0 ] for x in train_y[ :, task ] ]
test_y[ :, task ] = [ np.where( cat == x )[ 0 ][ 0 ] for x in test_y[ :, task ] ]
run_filter_sizes = []
run_num_filters = []
for k in range( filter_sets ):
run_filter_sizes.append( filter_sizes + k )
run_num_filters.append( num_filters )
ret = run_cnn(
gParameters,
train_x, train_y, test_x, test_y,
learning_rate = learning_rate,
batch_size = batch_size,
epochs = epochs,
dropout = dropout,
optimizer = optimizer,
wv_len = wv_len,
filter_sizes = run_filter_sizes,
num_filters = run_num_filters,
emb_l2 = emb_l2,
w_l2 = w_l2
)
return ret
def main():
gParameters = initialize_parameters()
avg_loss = run(gParameters)
print( "Return: ", avg_loss )
if __name__ == '__main__':
main()
try:
K.clear_session()
except AttributeError: # theano does not have this function
pass
| mit |
cdeil/astrometric_checks | test_astrometric.py | 1 | 8356 | """Tests for the Astropy Astrometric class against the HESS software.
http://docs.astropy.org/en/latest/coordinates/matchsep.html?highlight=astrometric%20frame#astrometric-frames
https://github.com/astropy/astropy/pull/4909
https://github.com/astropy/astropy/issues/4931
https://github.com/astropy/astropy/pull/4941
"""
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.coordinates import SkyCoord, Angle
def copy_test_event_list():
"""Copy HESS event list as test data file.
This isn't public data, so we cut out the photons from the Crab nebula.
"""
filename = '/Users/deil/work/hess-host-analyses/checks/pointing_check/run_0018406_std_fullEnclosure_eventlist.fits'
hdu_list = fits.open(filename)
table = hdu_list['EVENTS']
source_pos = SkyCoord(table.header['RA_OBJ'], table.header['DEC_OBJ'], unit='deg')
event_pos = SkyCoord(table.data['RA'], table.data['DEC'], unit='deg')
sep = source_pos.separation(event_pos)
mask = (sep > Angle(0.3, 'deg'))
hdu_list['EVENTS'] = fits.BinTableHDU(header=table.header, data=table.data[mask], name='EVENTS')
filename = 'hess_event_list.fits'
print('Writing {}'.format(filename))
hdu_list.writeto(filename, clobber=True)
def add_astropy_radec_coordinates(table):
"""Test FOV RADEC coordinate transformations.
"""
# Set up test data and astrometric frame (a.k.a. FOV frame)
# centered on the telescope pointing position
center = SkyCoord(table.meta['RA_PNT'], table.meta['DEC_PNT'], unit='deg')
aframe = center.skyoffset_frame()
# Transform: RADEC -> FOV_RADEC
event_radec = SkyCoord(table['RA'], table['DEC'], unit='deg')
event_fov = event_radec.transform_to(aframe)
# event_fov = event_radec.spherical_offsets_to(center)
table['FOV_RADEC_LON_ASTROPY'] = event_fov.data.lon.wrap_at('180 deg').to('deg')
table['FOV_RADEC_LAT_ASTROPY'] = event_fov.data.lat.to('deg')
table['FOV_RADEC_LON_DIFF'] = Angle(table['FOV_RADEC_LON_ASTROPY'] - table['FOV_RADEC_LON'], 'deg').to('arcsec')
table['FOV_RADEC_LAT_DIFF'] = Angle(table['FOV_RADEC_LAT_ASTROPY'] - table['FOV_RADEC_LAT'], 'deg').to('arcsec')
# Transform: FOV_RADEC -> RADEC #
event_fov = SkyCoord(table['FOV_RADEC_LON_ASTROPY'], table['FOV_RADEC_LAT_ASTROPY'], unit='deg', frame=aframe)
event_radec = event_fov.transform_to('icrs')
table['RA_ASTROPY'] = event_radec.data.lon.to('deg')
table['DEC_ASTROPY'] = event_radec.data.lat.to('deg')
table['RA_DIFF'] = Angle(table['RA_ASTROPY'] - table['RA'], 'deg').to('arcsec')
table['DEC_DIFF'] = Angle(table['DEC_ASTROPY'] - table['DEC'], 'deg').to('arcsec')
# Check results
# table.info('stats')
"""
FOV_RADEC_LON_ASTROPY 0.0443400478952 1.58878237603 -35.1578248604 21.7907000503
FOV_RADEC_LAT_ASTROPY -0.0177905539829 1.57634999964 -28.7981936822 17.18667566
FOV_RADEC_LON_DIFF -2.07926024446 0.792609733345 -8.00939051063 6.52532690282
FOV_RADEC_LAT_DIFF -16.225735491 0.608440988983 -24.6793550923 -6.86899057409
RA_ASTROPY 83.6820322695 1.72309019655 52.9889995666 106.496595676
DEC_ASTROPY 24.4867638715 1.58698917572 -8.10738650931 41.7000435121
RA_DIFF -0.00228730168484 0.00720260391234 -0.0173224899129 0.0125271212539
DEC_DIFF -0.000683806319612 0.00343228179454 -0.0148859721222 0.0125804299074
Conclusions:
* currently results for RADEC -> FOV_RADEC are off by this much for unknown reasons:
-8 to +6 arcsec in LON
-24 to -6 arcsec in LAT
* the Astropy transformation does roundtrip with this accuracy
* good enough for us ... won't investigate further.
* could be limited by float32 and switching to float64 would improve accuracy?
* 0.01 arcsec in LON and LAT
"""
# import IPython; IPython.embed()
return table
def add_astropy_altaz_coordinates(table):
"""Test FOV RADEC coordinate transformations.
"""
# Set up test data and astrometric frame (a.k.a. FOV frame)
# centered on the telescope pointing position
from gammapy.data import EventList
event_list = EventList(table)
# print(event_list)
center = event_list.pointing_radec.transform_to(event_list.altaz.frame)
# import IPython; IPython.embed()
# center = SkyCoord(table.meta['RA_PNT'], table.meta['DEC_PNT'], unit='deg')
aframe = center.skyoffset_frame()
#
# # Transform: RADEC -> FOV_RADEC
# event_radec = SkyCoord(table['RA'], table['DEC'], unit='deg')
event_pos = event_list.altaz
event_fov = event_pos.transform_to(aframe)
# event_fov = event_radec.spherical_offsets_to(center)
table['FOV_ALTAZ_LON_ASTROPY'] = event_fov.data.lon.wrap_at('180 deg').to('deg')
table['FOV_ALTAZ_LAT_ASTROPY'] = event_fov.data.lat.to('deg')
table['FOV_ALTAZ_LON_DIFF'] = Angle(table['FOV_ALTAZ_LON_ASTROPY'] - table['FOV_ALTAZ_LON'], 'deg').to('arcsec')
table['FOV_ALTAZ_LAT_DIFF'] = Angle(table['FOV_ALTAZ_LAT_ASTROPY'] - table['FOV_ALTAZ_LAT'], 'deg').to('arcsec')
return table
def plot_fov_radec():
"""Make plots to illustrate the RADEC FOV trafo differences."""
import matplotlib.pyplot as plt
def test_fov_altaz():
"""Test FOV ALTAZ coordinate transformations.
"""
# Set up test data and astrometric frame (a.k.a. FOV frame)
# centered on the telescope pointing position
table = Table.read('hess_event_list.fits')
center = SkyCoord(table.meta['RA_PNT'], table.meta['DEC_PNT'], unit='deg')
aframe = center.astrometric_frame()
# TODO: this is more tricky, because AZ_PNT and ALT_PNT is changing with time.
# We first have to get full agreement with the normal ALTAZ to RADEC trafo
# before debugging the ALTAZ FOV event coordinates.
# import IPython; IPython.embed()
def test_separations():
"""Test if sky separations are the same in all spherical coordinate systems.
This is a simple consistency check.
Sky separations computed between consecutive event positions should
be the same in any spherical coordinate system.
"""
table = Table.read('hess_event_list_2.fits')
def separation(table, lon_colname, lat_colname):
lon = np.array(table[lon_colname], dtype=np.float64)
lat = np.array(table[lat_colname], dtype=np.float64)
pos1 = SkyCoord(lon[:1], lat[:1], unit='deg')
pos2 = SkyCoord(lon[1:], lat[1:], unit='deg')
sep = pos1.separation(pos2).arcsec
res = np.empty(len(table), dtype=np.float64)
res[:-1] = sep
res[-1] = np.nan
return res
table['SEP_RADEC'] = separation(table, 'RA', 'DEC')
table['SEP_RADEC_FOV'] = separation(table, 'FOV_RADEC_LON', 'FOV_RADEC_LAT')
table['SEP_RADEC_FOV_MINUS_SEP_RADEC'] = table['SEP_RADEC_FOV'] - table['SEP_RADEC']
print('Max separation difference RADEC_FOV to RADEC: {} arcsec'.format(np.nanmax(table['SEP_RADEC_FOV_MINUS_SEP_RADEC'])))
# TODO: this currently gives 14.9 arcsec, i.e. there's an issue!
table['SEP_RADEC_FOV_ASTROPY'] = separation(table, 'FOV_RADEC_LON_ASTROPY', 'FOV_RADEC_LAT_ASTROPY')
table['SEP_RADEC_FOV_ASTROPY_MINUS_SEP_RADEC'] = table['SEP_RADEC_FOV_ASTROPY'] - table['SEP_RADEC']
print('Max separation difference RADEC_FOV_ASTROPY to RADEC: {} arcsec'.format(np.nanmax(table['SEP_RADEC_FOV_ASTROPY_MINUS_SEP_RADEC'])))
# 0.02 arcsec => OK
# Note: for ALTAZ this is not expected to match RADEC, because the earth is rotating between events.
# table['SEP_ALTAZ'] = separation(table, 'AZ', 'ALT')
# table['SEP_RADEC_MINUS_SEP_ALTAZ'] = table['SEP_RADEC'] - table['SEP_ALTAZ']
# print('Max separation difference RADEC to ALTAZ: {}'.format(np.nanmax(table['SEP_RADEC_MINUS_SEP_ALTAZ'])))
# table.info('stats')
# table.write('temp.fits', overwrite=True)
if __name__ == '__main__':
# copy_test_event_list()
table = Table.read('hess_event_list.fits', hdu='EVENTS')
table2 = add_astropy_radec_coordinates(table)
table3 = add_astropy_altaz_coordinates(table2)
table3.info('stats')
filename = 'hess_event_list_3.fits'
print('Writing {}'.format(filename))
table3.write(filename, overwrite=True)
# test_separations()
# test_fov_altaz()
| mit |
xlhtc007/blaze | blaze/compute/tests/test_numpy_compute.py | 6 | 16540 | from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
import pandas as pd
from datetime import datetime, date
from blaze.compute.core import compute, compute_up
from blaze.expr import symbol, by, exp, summary, Broadcast, join, concat
from blaze import sin
from odo import into
from datashape import discover, to_numpy, dshape
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', 'i8'), ('name', 'S7'), ('amount', 'i8')])
t = symbol('t', discover(x))
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
return c.all()
return c
def test_symbol():
assert eq(compute(t, x), x)
def test_eq():
assert eq(compute(t['amount'] == 100, x),
x['amount'] == 100)
def test_selection():
assert eq(compute(t[t['amount'] == 100], x), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], x), x[x['amount'] < 0])
def test_arithmetic():
assert eq(compute(t['amount'] + t['id'], x),
x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], x),
x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], x),
x['amount'] % x['id'])
def test_UnaryOp():
assert eq(compute(exp(t['amount']), x),
np.exp(x['amount']))
assert eq(compute(abs(-t['amount']), x),
abs(-x['amount']))
def test_Neg():
assert eq(compute(-t['amount'], x),
-x['amount'])
def test_invert_not():
assert eq(compute(~(t.amount > 0), x),
~(x['amount'] > 0))
def test_Reductions():
assert compute(t['amount'].mean(), x) == x['amount'].mean()
assert compute(t['amount'].count(), x) == len(x['amount'])
assert compute(t['amount'].sum(), x) == x['amount'].sum()
assert compute(t['amount'].min(), x) == x['amount'].min()
assert compute(t['amount'].max(), x) == x['amount'].max()
assert compute(t['amount'].nunique(), x) == len(np.unique(x['amount']))
assert compute(t['amount'].var(), x) == x['amount'].var()
assert compute(t['amount'].std(), x) == x['amount'].std()
assert compute(t['amount'].var(unbiased=True), x) == x['amount'].var(ddof=1)
assert compute(t['amount'].std(unbiased=True), x) == x['amount'].std(ddof=1)
assert compute((t['amount'] > 150).any(), x) == True
assert compute((t['amount'] > 250).all(), x) == False
assert compute(t['amount'][0], x) == x['amount'][0]
assert compute(t['amount'][-1], x) == x['amount'][-1]
def test_count_string():
s = symbol('name', 'var * ?string')
x = np.array(['Alice', np.nan, 'Bob', 'Denis', 'Edith'], dtype='object')
assert compute(s.count(), x) == 4
def test_reductions_on_recarray():
assert compute(t.count(), x) == len(x)
def test_count_nan():
t = symbol('t', '3 * ?real')
x = np.array([1.0, np.nan, 2.0])
assert compute(t.count(), x) == 2
def test_distinct():
x = np.array([('Alice', 100),
('Alice', -200),
('Bob', 100),
('Bob', 100)],
dtype=[('name', 'S5'), ('amount', 'i8')])
t = symbol('t', 'var * {name: string, amount: int64}')
assert eq(compute(t['name'].distinct(), x),
np.unique(x['name']))
assert eq(compute(t.distinct(), x),
np.unique(x))
def test_distinct_on_recarray():
rec = pd.DataFrame(
[[0, 1],
[0, 2],
[1, 1],
[1, 2]],
columns=('a', 'b'),
).to_records(index=False)
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[[0, 1],
[1, 1]],
columns=('a', 'b'),
).to_records(index=False)
).all()
def test_distinct_on_structured_array():
arr = np.array(
[(0., 1.),
(0., 2.),
(1., 1.),
(1., 2.)],
dtype=[('a', 'f4'), ('b', 'f4')],
)
s = symbol('s', discover(arr))
assert(
compute(s.distinct('a'), arr) ==
np.array([(0., 1.), (1., 1.)], dtype=arr.dtype)
).all()
def test_distinct_on_str():
rec = pd.DataFrame(
[['a', 'a'],
['a', 'b'],
['b', 'a'],
['b', 'b']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[['a', 'a'],
['b', 'a']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
).all()
def test_sort():
assert eq(compute(t.sort('amount'), x),
np.sort(x, order='amount'))
assert eq(compute(t.sort('amount', ascending=False), x),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort(['amount', 'id']), x),
np.sort(x, order=['amount', 'id']))
assert eq(compute(t.amount.sort(), x),
np.sort(x['amount']))
def test_head():
assert eq(compute(t.head(2), x),
x[:2])
def test_tail():
assert eq(compute(t.tail(2), x),
x[-2:])
def test_label():
expected = x['amount'] * 10
expected = np.array(expected, dtype=[('foo', 'i8')])
assert eq(compute((t['amount'] * 10).label('foo'), x),
expected)
def test_relabel():
expected = np.array(x, dtype=[('ID', 'i8'), ('NAME', 'S7'), ('amount', 'i8')])
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), x)
assert result.dtype.names == expected.dtype.names
assert eq(result, expected)
def test_by():
expr = by(t.amount > 0, count=t.id.count())
result = compute(expr, x)
assert set(map(tuple, into(list, result))) == set([(False, 2), (True, 3)])
def test_compute_up_field():
assert eq(compute(t['name'], x), x['name'])
def test_compute_up_projection():
assert eq(compute_up(t[['name', 'amount']], x), x[['name', 'amount']])
ax = np.arange(30, dtype='f4').reshape((5, 3, 2))
a = symbol('a', discover(ax))
def test_slice():
inds = [0, slice(2), slice(1, 3), slice(None, None, 2), [1, 2, 3],
(0, 1), (0, slice(1, 3)), (slice(0, 3), slice(3, 1, -1)),
(0, [1, 2])]
for s in inds:
assert (compute(a[s], ax) == ax[s]).all()
def test_array_reductions():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis), ax), ax.sum(axis=axis))
assert eq(compute(a.std(axis=axis), ax), ax.std(axis=axis))
def test_array_reductions_with_keepdims():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis, keepdims=True), ax),
ax.sum(axis=axis, keepdims=True))
def test_summary_on_ndarray():
assert compute(summary(total=a.sum(), min=a.min()), ax) == \
(ax.min(), ax.sum())
result = compute(summary(total=a.sum(), min=a.min(), keepdims=True), ax)
expected = np.array([(ax.min(), ax.sum())],
dtype=[('min', 'float32'), ('total', 'float64')])
assert result.ndim == ax.ndim
assert eq(expected, result)
def test_summary_on_ndarray_with_axis():
for axis in [0, 1, (1, 0)]:
expr = summary(total=a.sum(), min=a.min(), axis=axis)
result = compute(expr, ax)
shape, dtype = to_numpy(expr.dshape)
expected = np.empty(shape=shape, dtype=dtype)
expected['total'] = ax.sum(axis=axis)
expected['min'] = ax.min(axis=axis)
assert eq(result, expected)
def test_utcfromtimestamp():
t = symbol('t', '1 * int64')
data = np.array([0, 1])
expected = np.array(['1970-01-01T00:00:00Z', '1970-01-01T00:00:01Z'],
dtype='M8[us]')
assert eq(compute(t.utcfromtimestamp, data), expected)
def test_nelements_structured_array():
assert compute(t.nelements(), x) == len(x)
assert compute(t.nelements(keepdims=True), x) == (len(x),)
def test_nelements_array():
t = symbol('t', '5 * 4 * 3 * float64')
x = np.random.randn(*t.shape)
result = compute(t.nelements(axis=(0, 1)), x)
np.testing.assert_array_equal(result, np.array([20, 20, 20]))
result = compute(t.nelements(axis=1), x)
np.testing.assert_array_equal(result, 4 * np.ones((5, 3)))
def test_nrows():
assert compute(t.nrows, x) == len(x)
dts = np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:05Z'],
dtype='M8[us]')
s = symbol('s', 'var * datetime')
def test_datetime_truncation():
assert eq(compute(s.truncate(1, 'day'), dts),
dts.astype('M8[D]'))
assert eq(compute(s.truncate(2, 'seconds'), dts),
np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:04Z'],
dtype='M8[s]'))
assert eq(compute(s.truncate(2, 'weeks'), dts),
np.array(['2000-06-18', '2000-06-18'], dtype='M8[D]'))
assert into(list, compute(s.truncate(1, 'week'), dts))[0].isoweekday() == 7
def test_hour():
dts = [datetime(2000, 6, 20, 1, 00, 00),
datetime(2000, 6, 20, 12, 59, 59),
datetime(2000, 6, 20, 12, 00, 00),
datetime(2000, 6, 20, 11, 59, 59)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'hour'), dts),
into(np.ndarray, [datetime(2000, 6, 20, 1, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 11, 0)]))
def test_month():
dts = [datetime(2000, 7, 1),
datetime(2000, 6, 30),
datetime(2000, 6, 1),
datetime(2000, 5, 31)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'month'), dts),
into(np.ndarray, [date(2000, 7, 1),
date(2000, 6, 1),
date(2000, 6, 1),
date(2000, 5, 1)]))
def test_truncate_on_np_datetime64_scalar():
s = symbol('s', 'datetime')
data = np.datetime64('2000-01-02T12:30:00Z')
assert compute(s.truncate(1, 'day'), data) == data.astype('M8[D]')
def test_numpy_and_python_datetime_truncate_agree_on_start_of_week():
s = symbol('s', 'datetime')
n = np.datetime64('2014-11-11')
p = datetime(2014, 11, 11)
expr = s.truncate(1, 'week')
assert compute(expr, n) == compute(expr, p)
def test_add_multiple_ndarrays():
a = symbol('a', '5 * 4 * int64')
b = symbol('b', '5 * 4 * float32')
x = np.arange(9, dtype='int64').reshape(3, 3)
y = (x + 1).astype('float32')
expr = sin(a) + 2 * b
scope = {a: x, b: y}
expected = sin(x) + 2 * y
# check that we cast correctly
assert expr.dshape == dshape('5 * 4 * float64')
np.testing.assert_array_equal(compute(expr, scope), expected)
np.testing.assert_array_equal(compute(expr, scope, optimize=False),
expected)
nA = np.arange(30, dtype='f4').reshape((5, 6))
ny = np.arange(6, dtype='f4')
A = symbol('A', discover(nA))
y = symbol('y', discover(ny))
def test_transpose():
assert eq(compute(A.T, nA), nA.T)
assert eq(compute(A.transpose((0, 1)), nA), nA)
def test_dot():
assert eq(compute(y.dot(y), {y: ny}), np.dot(ny, ny))
assert eq(compute(A.dot(y), {A: nA, y: ny}), np.dot(nA, ny))
def test_subexpr_datetime():
data = pd.date_range(start='01/01/2010', end='01/04/2010', freq='D').values
s = symbol('s', discover(data))
result = compute(s.truncate(days=2).day, data)
expected = np.array([31, 2, 2, 4])
np.testing.assert_array_equal(result, expected)
def test_mixed_types():
x = np.array([[(4, 180), (4, 184), (4, 188), (4, 192), (4, 196)],
[(4, 660), (4, 664), (4, 668), (4, 672), (4, 676)],
[(4, 1140), (4, 1144), (4, 1148), (4, 1152), (4, 1156)],
[(4, 1620), (4, 1624), (4, 1628), (4, 1632), (4, 1636)],
[(4, 2100), (4, 2104), (4, 2108), (4, 2112), (4, 2116)]],
dtype=[('count', '<i4'), ('total', '<i8')])
aggregate = symbol('aggregate', discover(x))
result = compute(aggregate.total.sum(axis=(0,)) /
aggregate['count'].sum(axis=(0,)), x)
expected = (x['total'].sum(axis=0, keepdims=True) /
x['count'].sum(axis=0, keepdims=True)).squeeze()
np.testing.assert_array_equal(result, expected)
def test_broadcast_compute_against_numbers_and_arrays():
A = symbol('A', '5 * float32')
a = symbol('a', 'float32')
b = symbol('b', 'float32')
x = np.arange(5, dtype='f4')
expr = Broadcast((A, b), (a, b), a + b)
result = compute(expr, {A: x, b: 10})
assert eq(result, x + 10)
def test_map():
pytest.importorskip('numba')
a = np.arange(10.0)
f = lambda x: np.sin(x) + 1.03 * np.cos(x) ** 2
x = symbol('x', discover(a))
expr = x.map(f, 'float64')
result = compute(expr, a)
expected = f(a)
# make sure we're not going to pandas here
assert type(result) == np.ndarray
assert type(result) == type(expected)
np.testing.assert_array_equal(result, expected)
def test_vector_norm():
x = np.arange(30).reshape((5, 6))
s = symbol('x', discover(x))
assert eq(compute(s.vnorm(), x),
np.linalg.norm(x))
assert eq(compute(s.vnorm(ord=1), x),
np.linalg.norm(x.flatten(), ord=1))
assert eq(compute(s.vnorm(ord=4, axis=0), x),
np.linalg.norm(x, ord=4, axis=0))
expr = s.vnorm(ord=4, axis=0, keepdims=True)
assert expr.shape == compute(expr, x).shape
def test_join():
cities = np.array([('Alice', 'NYC'),
('Alice', 'LA'),
('Bob', 'Chicago')],
dtype=[('name', 'S7'), ('city', 'O')])
c = symbol('cities', discover(cities))
expr = join(t, c, 'name')
result = compute(expr, {t: x, c: cities})
assert (b'Alice', 1, 100, 'LA') in into(list, result)
def test_query_with_strings():
b = np.array([('a', 1), ('b', 2), ('c', 3)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
assert compute(s[s.x == b'b'], b).tolist() == [(b'b', 2)]
@pytest.mark.parametrize('keys', [['a'], list('bc')])
def test_isin(keys):
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
result = compute(s.x.isin(keys), b)
expected = np.in1d(b['x'], keys)
np.testing.assert_array_equal(result, expected)
def test_nunique_recarray():
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6),
('a', 1), ('b', 2)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
expr = s.nunique()
assert compute(expr, b) == len(np.unique(b))
def test_str_repeat():
a = np.array(('a', 'b', 'c'))
s = symbol('s', discover(a))
expr = s.repeat(3)
assert all(compute(expr, a) == np.char.multiply(a, 3))
def test_str_interp():
a = np.array(('%s', '%s', '%s'))
s = symbol('s', discover(a))
expr = s.interp(1)
assert all(compute(expr, a) == np.char.mod(a, 1))
def test_timedelta_arith():
dates = np.arange('2014-01-01', '2014-02-01', dtype='datetime64')
delta = np.timedelta64(1, 'D')
sym = symbol('s', discover(dates))
assert (compute(sym + delta, dates) == dates + delta).all()
assert (compute(sym - delta, dates) == dates - delta).all()
def test_coerce():
x = np.arange(1, 3)
s = symbol('s', discover(x))
np.testing.assert_array_equal(compute(s.coerce('float64'), x),
np.arange(1.0, 3.0))
def test_concat_arr():
s_data = np.arange(15)
t_data = np.arange(15, 30)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30)
).all()
def test_concat_mat():
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30).reshape(10, 3)
).all()
assert (
compute(concat(s, t, axis=1), {s: s_data, t: t_data}) ==
np.concatenate((s_data, t_data), axis=1)
).all()
| bsd-3-clause |
SkRobo/Eurobot-2017 | old year/RESET-master/prob_motion_model.py | 2 | 3393 |
"""
Sampling algorithm for Probabilistic Odometry Holonomic Motion Model
Ref: Probabilistic Robotics, ch 5.4, pp 136
http://ais.informatik.uni-freiburg.de/teaching/ss11/robotics/slides/06-motion-models.pdf
"""
import math
import random
import matplotlib.pyplot as plt
x, y, tetha = 0, 0, 0 # t-1 position in global coord sys
x_bar, y_bar, tetha_bar = 2, 3, 0 # t-1 odometry position in local coord sys
x_bar_prime, y_bar_prime, tetha_bar_prime = 3, 3.5, math.pi/3 # t odometry position in local coord sys
alpha1, alpha2, alpha3 = 0.1,0.1,0.05 # robot error parameters
x_plot = []
y_plot = []
tetha_plot = []
tetha_dummy = []
def prob3(x, y, tetha, x_bar, y_bar, tetha_bar, x_bar_prime, y_bar_prime, tetha_bar_prime):
"""Calculates the final robor position in global coord sys"""
delta_x = x_bar_prime - x_bar
delta_y = y_bar_prime - y_bar
delta_rot = tetha_bar_prime - tetha_bar
edelta_x = delta_x - sample(alpha1*math.fabs(delta_x) + alpha2*math.fabs(delta_y)/3 + alpha3*math.fabs(delta_rot)/3)
edelta_y = delta_y - sample(alpha1*math.fabs(delta_x)/3 + alpha2*math.fabs(delta_y) + alpha3*math.fabs(delta_rot)/3)
edelta_rot = delta_rot - sample(alpha1*math.fabs(delta_x)/3 + alpha2*math.fabs(delta_y)/3 + alpha3*math.fabs(delta_rot))
x_prime = x + edelta_x
y_prime = y + edelta_y
tetha_prime = tetha + edelta_rot
return x_prime, y_prime, tetha_prime
def prob2(x, y, tetha, x_bar, y_bar, tetha_bar, x_bar_prime, y_bar_prime, tetha_bar_prime):
"""Calculates the final robor position in global coord sys"""
delta_trans = math.sqrt(math.pow(x_bar - x_bar_prime, 2) + math.pow(y_bar - y_bar_prime, 2))
delta_rot = tetha_bar_prime - tetha_bar
delta_angle = math.atan2(y_bar_prime - y_bar, x_bar_prime - x_bar)
edelta_trans = delta_trans - sample(alpha1*math.fabs(delta_trans) + alpha3*math.fabs(delta_rot)/3)
edelta_rot = delta_rot - sample(alpha1*math.fabs(delta_trans)/3 + alpha3*math.fabs(delta_rot))
edelta_angle = delta_angle - sample(alpha1*math.fabs(delta_trans)/4) # dividing by 4 gives same 1D result as prob
x_prime = x + edelta_trans*math.cos(edelta_angle)
y_prime = y + edelta_trans*math.sin(edelta_angle)
tetha_prime = tetha + edelta_rot
return x_prime, y_prime, tetha_prime
def prob(pose, delta):
# From robot I get relative position. And I can do new relative minus
# old relative to get displacement dx, dy, dtheta
if delta[2] == 0:
x_prime = pose[0] + delta[0] + trans_noise()
y_prime = pose[1] + delta[1] + trans_noise()
theta_prime = pose[2] + theta_noise()
# else:
# x_prime = pose[0] + delta[0] + noise
# y_prime = pose[1] + delta[1] + noise
# theta_prime = pose[2] + delta[2] + noise + drift # pp 3
return x_prime, y_prime, theta_prime
def trans_noise():
return random.gauss(0, 10)
def theta_noise():
return random.gauss(0, 0.1)
def sample(b):
"""Draws a sample from the normal distribution"""
#b = math.sqrt(b2)
r = 0
for i in xrange(12):
r += random.uniform(-b, b)
return 0.5*r
if __name__ == '__main__':
for i in range(500):
"""Construct probable poositions"""
x_temp, y_temp, tetha_temp = prob2(x, y, tetha, x_bar, y_bar, tetha_bar, x_bar_prime, y_bar_prime, tetha_bar_prime)
x_plot.append(x_temp)
y_plot.append(y_temp)
tetha_plot.append(tetha_temp)
tetha_dummy.append(0)
#plt.plot(tetha_plot, tetha_dummy, 'ro')
plt.plot(x_plot, y_plot, 'ro')
plt.axis([0, 5, 0, 5])
plt.show()
| mit |
hsuantien/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
ARudiuk/mne-python | mne/io/base.py | 1 | 97091 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Martin Luessi <[email protected]>
# Denis Engemann <[email protected]>
# Teon Brooks <[email protected]>
# Marijn van Vliet <[email protected]>
#
# License: BSD (3-clause)
import copy
from copy import deepcopy
import os
import os.path as op
import numpy as np
from scipy import linalg
from .constants import FIFF
from .pick import pick_types, channel_type, pick_channels, pick_info
from .pick import _pick_data_channels, _pick_data_or_ica
from .meas_info import write_meas_info
from .proj import setup_proj, activate_proj, _proj_equal, ProjMixin
from ..channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from ..channels.montage import read_montage, _set_montage, Montage
from .compensator import set_current_comp
from .write import (start_file, end_file, start_block, end_block,
write_dau_pack16, write_float, write_double,
write_complex64, write_complex128, write_int,
write_id, write_string, write_name_list, _get_split_size)
from ..filter import (low_pass_filter, high_pass_filter, band_pass_filter,
notch_filter, band_stop_filter, resample,
_resample_stim_channels)
from ..fixes import in1d
from ..parallel import parallel_func
from ..utils import (_check_fname, _check_pandas_installed,
_check_pandas_index_arguments, _check_copy_dep,
check_fname, _get_stim_channel, object_hash,
logger, verbose, _time_mask, warn)
from ..viz import plot_raw, plot_raw_psd, plot_raw_psd_topo
from ..defaults import _handle_default
from ..externals.six import string_types
from ..event import find_events, concatenate_events
from ..annotations import _combine_annotations, _onset_to_seconds
class ToDataFrameMixin(object):
"""Class to add to_data_frame capabilities to certain classes."""
def _get_check_picks(self, picks, picks_check):
if picks is None:
picks = list(range(self.info['nchan']))
else:
if not in1d(picks, np.arange(len(picks_check))).all():
raise ValueError('At least one picked channel is not present '
'in this object instance.')
return picks
def to_data_frame(self, picks=None, index=None, scale_time=1e3,
scalings=None, copy=True, start=None, stop=None):
"""Export data in tabular structure as a pandas DataFrame.
Columns and indices will depend on the object being converted.
Generally this will include as much relevant information as
possible for the data type being converted. This makes it easy
to convert data for use in packages that utilize dataframes,
such as statsmodels or seaborn.
Parameters
----------
picks : array-like of int | None
If None only MEG and EEG channels are kept
otherwise the channels indices in picks are kept.
index : tuple of str | None
Column to be used as index for the data. Valid string options
are 'epoch', 'time' and 'condition'. If None, all three info
columns will be included in the table as categorial data.
scale_time : float
Scaling to be applied to time units.
scalings : dict | None
Scaling to be applied to the channels picked. If None, defaults to
``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)``.
copy : bool
If true, data will be copied. Else data may be modified in place.
start : int | None
If it is a Raw object, this defines a starting index for creating
the dataframe from a slice. The times will be interpolated from the
index and the sampling rate of the signal.
stop : int | None
If it is a Raw object, this defines a stop index for creating
the dataframe from a slice. The times will be interpolated from the
index and the sampling rate of the signal.
Returns
-------
df : instance of pandas.core.DataFrame
A dataframe suitable for usage with other
statistical/plotting/analysis packages. Column/Index values will
depend on the object type being converted, but should be
human-readable.
"""
from ..epochs import _BaseEpochs
from ..evoked import Evoked
from ..source_estimate import _BaseSourceEstimate
pd = _check_pandas_installed()
mindex = list()
# Treat SourceEstimates special because they don't have the same info
if isinstance(self, _BaseSourceEstimate):
if self.subject is None:
default_index = ['time']
else:
default_index = ['subject', 'time']
data = self.data.T
times = self.times
shape = data.shape
mindex.append(('subject', np.repeat(self.subject, shape[0])))
if isinstance(self.vertices, list):
# surface source estimates
col_names = [i for e in [
['{0} {1}'.format('LH' if ii < 1 else 'RH', vert)
for vert in vertno]
for ii, vertno in enumerate(self.vertices)]
for i in e]
else:
# volume source estimates
col_names = ['VOL {0}'.format(vert) for vert in self.vertices]
elif isinstance(self, (_BaseEpochs, _BaseRaw, Evoked)):
picks = self._get_check_picks(picks, self.ch_names)
if isinstance(self, _BaseEpochs):
default_index = ['condition', 'epoch', 'time']
data = self.get_data()[:, picks, :]
times = self.times
n_epochs, n_picks, n_times = data.shape
data = np.hstack(data).T # (time*epochs) x signals
# Multi-index creation
times = np.tile(times, n_epochs)
id_swapped = dict((v, k) for k, v in self.event_id.items())
names = [id_swapped[k] for k in self.events[:, 2]]
mindex.append(('condition', np.repeat(names, n_times)))
mindex.append(('epoch',
np.repeat(np.arange(n_epochs), n_times)))
col_names = [self.ch_names[k] for k in picks]
elif isinstance(self, (_BaseRaw, Evoked)):
default_index = ['time']
if isinstance(self, _BaseRaw):
data, times = self[picks, start:stop]
elif isinstance(self, Evoked):
data = self.data[picks, :]
times = self.times
n_picks, n_times = data.shape
data = data.T
col_names = [self.ch_names[k] for k in picks]
types = [channel_type(self.info, idx) for idx in picks]
n_channel_types = 0
ch_types_used = []
scalings = _handle_default('scalings', scalings)
for t in scalings.keys():
if t in types:
n_channel_types += 1
ch_types_used.append(t)
for t in ch_types_used:
scaling = scalings[t]
idx = [picks[i] for i in range(len(picks)) if types[i] == t]
if len(idx) > 0:
data[:, idx] *= scaling
else:
# In case some other object gets this mixin w/o an explicit check
raise NameError('Object must be one of Raw, Epochs, Evoked, or ' +
'SourceEstimate. This is {0}'.format(type(self)))
# Make sure that the time index is scaled correctly
times = np.round(times * scale_time)
mindex.append(('time', times))
if index is not None:
_check_pandas_index_arguments(index, default_index)
else:
index = default_index
if copy is True:
data = data.copy()
assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
df = pd.DataFrame(data, columns=col_names)
for i, (k, v) in enumerate(mindex):
df.insert(i, k, v)
if index is not None:
if 'time' in index:
logger.info('Converting time column to int64...')
df['time'] = df['time'].astype(np.int64)
df.set_index(index, inplace=True)
if all(i in default_index for i in index):
df.columns.name = 'signal'
return df
class TimeMixin(object):
"""Class to add sfreq and time_as_index capabilities to certain classes."""
def time_as_index(self, times, use_rounding=False):
"""Convert time to indices
Parameters
----------
times : list-like | float | int
List of numbers or a number representing points in time.
use_rounding : boolean
If True, use rounding (instead of truncation) when converting
times to indices. This can help avoid non-unique indices.
Returns
-------
index : ndarray
Indices corresponding to the times supplied.
"""
from ..source_estimate import _BaseSourceEstimate
if isinstance(self, _BaseSourceEstimate):
sfreq = 1. / self.tstep
else:
sfreq = self.info['sfreq']
index = (np.atleast_1d(times) - self.times[0]) * sfreq
if use_rounding:
index = np.round(index)
return index.astype(int)
def _check_fun(fun, d, *args, **kwargs):
want_shape = d.shape
d = fun(d, *args, **kwargs)
if not isinstance(d, np.ndarray):
raise TypeError('Return value must be an ndarray')
if d.shape != want_shape:
raise ValueError('Return data must have shape %s not %s'
% (want_shape, d.shape))
return d
class _BaseRaw(ProjMixin, ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin, ToDataFrameMixin,
TimeMixin):
"""Base class for Raw data
Subclasses must provide the following methods:
* _read_segment_file(self, data, idx, fi, start, stop, cals, mult)
(only needed for types that support on-demand disk reads)
The `_BaseRaw._raw_extras` list can contain whatever data is necessary for
such on-demand reads. For `RawFIF` this means a list of variables formerly
known as ``_rawdirs``.
"""
@verbose
def __init__(self, info, preload=False,
first_samps=(0,), last_samps=None,
filenames=(None,), raw_extras=(None,),
comp=None, orig_comp_grade=None, orig_format='double',
dtype=np.float64, verbose=None):
# wait until the end to preload data, but triage here
if isinstance(preload, np.ndarray):
# some functions (e.g., filtering) only work w/64-bit data
if preload.dtype not in (np.float64, np.complex128):
raise RuntimeError('datatype must be float64 or complex128, '
'not %s' % preload.dtype)
if preload.dtype != dtype:
raise ValueError('preload and dtype must match')
self._data = preload
self.preload = True
assert len(first_samps) == 1
last_samps = [first_samps[0] + self._data.shape[1] - 1]
load_from_disk = False
else:
if last_samps is None:
raise ValueError('last_samps must be given unless preload is '
'an ndarray')
if preload is False:
self.preload = False
load_from_disk = False
elif preload is not True and not isinstance(preload, string_types):
raise ValueError('bad preload: %s' % preload)
else:
load_from_disk = True
self._last_samps = np.array(last_samps)
self._first_samps = np.array(first_samps)
info._check_consistency() # make sure subclass did a good job
self.info = info
if info.get('buffer_size_sec', None) is None:
raise RuntimeError('Reader error, notify mne-python developers')
cals = np.empty(info['nchan'])
for k in range(info['nchan']):
cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
self.verbose = verbose
self._cals = cals
self._raw_extras = list(raw_extras)
self.comp = comp
self._orig_comp_grade = orig_comp_grade
self._filenames = list(filenames)
self.orig_format = orig_format
self._projectors = list()
self._projector = None
self._dtype_ = dtype
self.annotations = None
# If we have True or a string, actually do the preloading
self._update_times()
if load_from_disk:
self._preload_data(preload)
@property
def _dtype(self):
"""dtype for loading data (property so subclasses can override)"""
# most classes only store real data, they won't need anything special
return self._dtype_
def _read_segment(self, start=0, stop=None, sel=None, data_buffer=None,
projector=None, verbose=None):
"""Read a chunk of raw data
Parameters
----------
start : int, (optional)
first sample to include (first is 0). If omitted, defaults to the
first sample in data.
stop : int, (optional)
First sample to not include.
If omitted, data is included to the end.
sel : array, optional
Indices of channels to select.
data_buffer : array or str, optional
numpy array to fill with data read, must have the correct shape.
If str, a np.memmap with the correct data type will be used
to store the data.
projector : array
SSP operator to apply to the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data : array, [channels x samples]
the data matrix (channels x samples).
"""
# Initial checks
start = int(start)
stop = self.n_times if stop is None else min([int(stop), self.n_times])
if start >= stop:
raise ValueError('No data in this range')
# Initialize the data and calibration vector
n_sel_channels = self.info['nchan'] if sel is None else len(sel)
# convert sel to a slice if possible for efficiency
if sel is not None and len(sel) > 1 and np.all(np.diff(sel) == 1):
sel = slice(sel[0], sel[-1] + 1)
idx = slice(None, None, None) if sel is None else sel
data_shape = (n_sel_channels, stop - start)
dtype = self._dtype
if isinstance(data_buffer, np.ndarray):
if data_buffer.shape != data_shape:
raise ValueError('data_buffer has incorrect shape: %s != %s'
% (data_buffer.shape, data_shape))
data = data_buffer
elif isinstance(data_buffer, string_types):
# use a memmap
data = np.memmap(data_buffer, mode='w+',
dtype=dtype, shape=data_shape)
else:
data = np.zeros(data_shape, dtype=dtype)
# deal with having multiple files accessed by the raw object
cumul_lens = np.concatenate(([0], np.array(self._raw_lengths,
dtype='int')))
cumul_lens = np.cumsum(cumul_lens)
files_used = np.logical_and(np.less(start, cumul_lens[1:]),
np.greater_equal(stop - 1,
cumul_lens[:-1]))
# set up cals and mult (cals, compensation, and projector)
cals = self._cals.ravel()[np.newaxis, :]
if self.comp is not None:
if projector is not None:
mult = self.comp * cals
mult = np.dot(projector[idx], mult)
else:
mult = self.comp[idx] * cals
elif projector is not None:
mult = projector[idx] * cals
else:
mult = None
cals = cals.T[idx]
# read from necessary files
offset = 0
for fi in np.nonzero(files_used)[0]:
start_file = self._first_samps[fi]
# first iteration (only) could start in the middle somewhere
if offset == 0:
start_file += start - cumul_lens[fi]
stop_file = np.min([stop - cumul_lens[fi] + self._first_samps[fi],
self._last_samps[fi] + 1])
if start_file < self._first_samps[fi] or stop_file < start_file:
raise ValueError('Bad array indexing, could be a bug')
n_read = stop_file - start_file
this_sl = slice(offset, offset + n_read)
self._read_segment_file(data[:, this_sl], idx, fi,
int(start_file), int(stop_file),
cals, mult)
offset += n_read
return data
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file
Only needs to be implemented for readers that support
``preload=False``.
Parameters
----------
data : ndarray, shape (len(idx), stop - start + 1)
The data array. Should be modified inplace.
idx : ndarray | slice
The requested channel indices.
fi : int
The file index that must be read from.
start : int
The start sample in the given file.
stop : int
The stop sample in the given file (inclusive).
cals : ndarray, shape (len(idx), 1)
Channel calibrations (already sub-indexed).
mult : ndarray, shape (len(idx), len(info['chs']) | None
The compensation + projection + cals matrix, if applicable.
"""
raise NotImplementedError
def _check_bad_segment(self, start, stop, picks,
reject_by_annotation=False):
"""Function for checking if data segment is bad.
If the slice is good, returns the data in desired range.
If rejected based on annotation, returns description of the
bad segment as a string.
Parameters
----------
start : int
First sample of the slice.
stop : int
End of the slice.
picks : array of int
Channel picks.
reject_by_annotation : bool
Whether to perform rejection based on annotations.
False by default.
Returns
-------
data : array | str
Data in the desired range (good segment) or description of the bad
segment.
"""
if start < 0:
return None
if reject_by_annotation and self.annotations is not None:
annot = self.annotations
sfreq = self.info['sfreq']
onset = _onset_to_seconds(self, annot.onset)
overlaps = np.where(onset < stop / sfreq)
overlaps = np.where(onset[overlaps] + annot.duration[overlaps] >
start / sfreq)
for descr in annot.description[overlaps]:
if descr.lower().startswith('bad'):
return descr
return self[picks, start:stop][0]
@verbose
def load_data(self, verbose=None):
"""Load raw data
Parameters
----------
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : instance of Raw
The raw object with data.
Notes
-----
This function will load raw data if it was not already preloaded.
If data were already preloaded, it will do nothing.
.. versionadded:: 0.10.0
"""
if not self.preload:
self._preload_data(True)
return self
@verbose
def _preload_data(self, preload, verbose=None):
"""This function actually preloads the data"""
data_buffer = preload if isinstance(preload, (string_types,
np.ndarray)) else None
logger.info('Reading %d ... %d = %9.3f ... %9.3f secs...' %
(0, len(self.times) - 1, 0., self.times[-1]))
self._data = self._read_segment(data_buffer=data_buffer)
assert len(self._data) == self.info['nchan']
self.preload = True
self.close()
def _update_times(self):
"""Helper to update times"""
self._times = np.arange(self.n_times) / float(self.info['sfreq'])
# make it immutable
self._times.flags.writeable = False
@property
def first_samp(self):
return self._first_samps[0]
@property
def last_samp(self):
return self.first_samp + sum(self._raw_lengths) - 1
@property
def _raw_lengths(self):
return [l - f + 1 for f, l in zip(self._first_samps, self._last_samps)]
def __del__(self):
# remove file for memmap
if hasattr(self, '_data') and hasattr(self._data, 'filename'):
# First, close the file out; happens automatically on del
filename = self._data.filename
del self._data
# Now file can be removed
try:
os.remove(filename)
except OSError:
pass # ignore file that no longer exists
def __enter__(self):
""" Entering with block """
return self
def __exit__(self, exception_type, exception_val, trace):
""" Exiting with block """
try:
self.close()
except:
return exception_type, exception_val, trace
def __hash__(self):
if not self.preload:
raise RuntimeError('Cannot hash raw unless preloaded')
return object_hash(dict(info=self.info, data=self._data))
def _parse_get_set_params(self, item):
# make sure item is a tuple
if not isinstance(item, tuple): # only channel selection passed
item = (item, slice(None, None, None))
if len(item) != 2: # should be channels and time instants
raise RuntimeError("Unable to access raw data (need both channels "
"and time)")
if isinstance(item[0], slice):
start = item[0].start if item[0].start is not None else 0
nchan = self.info['nchan']
if start < 0:
start += nchan
if start < 0:
raise ValueError('start must be >= -%s' % nchan)
stop = item[0].stop if item[0].stop is not None else nchan
step = item[0].step if item[0].step is not None else 1
sel = list(range(start, stop, step))
else:
sel = item[0]
if isinstance(item[1], slice):
time_slice = item[1]
start, stop, step = (time_slice.start, time_slice.stop,
time_slice.step)
else:
item1 = item[1]
# Let's do automated type conversion to integer here
if np.array(item[1]).dtype.kind == 'i':
item1 = int(item1)
if isinstance(item1, (int, np.integer)):
start, stop, step = item1, item1 + 1, 1
else:
raise ValueError('Must pass int or slice to __getitem__')
if start is None:
start = 0
if (step is not None) and (step is not 1):
raise ValueError('step needs to be 1 : %d given' % step)
if isinstance(sel, (int, np.integer)):
sel = np.array([sel])
if sel is not None and len(sel) == 0:
raise ValueError("Empty channel list")
return sel, start, stop
def __getitem__(self, item):
"""Get raw data and times
Parameters
----------
item : tuple or array-like
See below for use cases.
Returns
-------
data : ndarray, shape (n_channels, n_times)
The raw data.
times : ndarray, shape (n_times,)
The times associated with the data.
Examples
--------
Generally raw data is accessed as::
>>> data, times = raw[picks, time_slice] # doctest: +SKIP
To get all data, you can thus do either of::
>>> data, times = raw[:] # doctest: +SKIP
Which will be equivalent to:
>>> data, times = raw[:, :] # doctest: +SKIP
To get only the good MEG data from 10-20 seconds, you could do::
>>> picks = mne.pick_types(raw.info, meg=True, exclude='bads') # doctest: +SKIP
>>> t_idx = raw.time_as_index([10., 20.]) # doctest: +SKIP
>>> data, times = raw[picks, t_idx[0]:t_idx[1]] # doctest: +SKIP
""" # noqa
sel, start, stop = self._parse_get_set_params(item)
if self.preload:
data = self._data[sel, start:stop]
else:
data = self._read_segment(start=start, stop=stop, sel=sel,
projector=self._projector,
verbose=self.verbose)
times = self.times[start:stop]
return data, times
def __setitem__(self, item, value):
"""setting raw data content with python slicing"""
_check_preload(self, 'Modifying data of Raw')
sel, start, stop = self._parse_get_set_params(item)
# set the data
self._data[sel, start:stop] = value
def anonymize(self):
"""Anonymize data
This function will remove ``raw.info['subject_info']`` if it exists.
Returns
-------
raw : instance of Raw
The raw object. Operates in place.
"""
self.info._anonymize()
return self
@verbose
def apply_function(self, fun, picks, dtype, n_jobs, *args, **kwargs):
""" Apply a function to a subset of channels.
The function "fun" is applied to the channels defined in "picks". The
data of the Raw object is modified inplace. If the function returns
a different data type (e.g. numpy.complex) it must be specified using
the dtype parameter, which causes the data type used for representing
the raw data to change.
The Raw object has to have the data loaded e.g. with ``preload=True``
or ``self.load_data()``.
.. note:: If n_jobs > 1, more memory is required as
``len(picks) * n_times`` additional time points need to
be temporaily stored in memory.
.. note:: If the data type changes (dtype != None), more memory is
required since the original and the converted data needs
to be stored in memory.
Parameters
----------
fun : function
A function to be applied to the channels. The first argument of
fun has to be a timeseries (numpy.ndarray). The function must
return an numpy.ndarray with the same size as the input.
picks : array-like of int | None
Indices of channels to apply the function to. If None, all
M-EEG channels are used.
dtype : numpy.dtype
Data type to use for raw data after applying the function. If None
the data type is not modified.
n_jobs: int
Number of jobs to run in parallel.
*args :
Additional positional arguments to pass to fun (first pos. argument
of fun is the timeseries of a channel).
**kwargs :
Keyword arguments to pass to fun. Note that if "verbose" is passed
as a member of ``kwargs``, it will be consumed and will override
the default mne-python verbose level (see mne.verbose).
"""
_check_preload(self, 'raw.apply_function')
if picks is None:
picks = _pick_data_channels(self.info, exclude=[],
with_ref_meg=False)
if not callable(fun):
raise ValueError('fun needs to be a function')
data_in = self._data
if dtype is not None and dtype != self._data.dtype:
self._data = self._data.astype(dtype)
if n_jobs == 1:
# modify data inplace to save memory
for idx in picks:
self._data[idx, :] = _check_fun(fun, data_in[idx, :],
*args, **kwargs)
else:
# use parallel function
parallel, p_fun, _ = parallel_func(_check_fun, n_jobs)
data_picks_new = parallel(p_fun(fun, data_in[p], *args, **kwargs)
for p in picks)
for pp, p in enumerate(picks):
self._data[p, :] = data_picks_new[pp]
@verbose
def apply_hilbert(self, picks, envelope=False, n_jobs=1, n_fft=None,
verbose=None):
""" Compute analytic signal or envelope for a subset of channels.
If envelope=False, the analytic signal for the channels defined in
"picks" is computed and the data of the Raw object is converted to
a complex representation (the analytic signal is complex valued).
If envelope=True, the absolute value of the analytic signal for the
channels defined in "picks" is computed, resulting in the envelope
signal.
.. warning: Do not use ``envelope=True`` if you intend to compute
an inverse solution from the raw data. If you want to
compute the envelope in source space, use
``envelope=False`` and compute the envelope after the
inverse solution has been obtained.
.. note:: If envelope=False, more memory is required since the
original raw data as well as the analytic signal have
temporarily to be stored in memory.
.. note:: If n_jobs > 1, more memory is required as
``len(picks) * n_times`` additional time points need to
be temporaily stored in memory.
Parameters
----------
picks : array-like of int
Indices of channels to apply the function to.
envelope : bool (default: False)
Compute the envelope signal of each channel.
n_jobs: int
Number of jobs to run in parallel.
n_fft : int > self.n_times | None
Points to use in the FFT for Hilbert transformation. The signal
will be padded with zeros before computing Hilbert, then cut back
to original length. If None, n == self.n_times.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
The analytic signal "x_a(t)" of "x(t)" is::
x_a = F^{-1}(F(x) 2U) = x + i y
where "F" is the Fourier transform, "U" the unit step function,
and "y" the Hilbert transform of "x". One usage of the analytic
signal is the computation of the envelope signal, which is given by
"e(t) = abs(x_a(t))". Due to the linearity of Hilbert transform and the
MNE inverse solution, the enevlope in source space can be obtained
by computing the analytic signal in sensor space, applying the MNE
inverse, and computing the envelope in source space.
Also note that the n_fft parameter will allow you to pad the signal
with zeros before performing the Hilbert transform. This padding
is cut off, but it may result in a slightly different result
(particularly around the edges). Use at your own risk.
"""
n_fft = self.n_times if n_fft is None else n_fft
if n_fft < self.n_times:
raise ValueError("n_fft must be greater than n_times")
if envelope is True:
self.apply_function(_my_hilbert, picks, None, n_jobs, n_fft,
envelope=envelope)
else:
self.apply_function(_my_hilbert, picks, np.complex64, n_jobs,
n_fft, envelope=envelope)
@verbose
def filter(self, l_freq, h_freq, picks=None, filter_length='10s',
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, n_jobs=1,
method='fft', iir_params=None, verbose=None):
"""Filter a subset of channels.
Applies a zero-phase low-pass, high-pass, band-pass, or band-stop
filter to the channels selected by ``picks``. By default the data
of the Raw object is modified inplace.
The Raw object has to have the data loaded e.g. with ``preload=True``
or ``self.load_data()``.
``l_freq`` and ``h_freq`` are the frequencies below which and above
which, respectively, to filter out of the data. Thus the uses are:
* ``l_freq < h_freq``: band-pass filter
* ``l_freq > h_freq``: band-stop filter
* ``l_freq is not None and h_freq is None``: high-pass filter
* ``l_freq is None and h_freq is not None``: low-pass filter
``self.info['lowpass']`` and ``self.info['highpass']`` are only
updated with picks=None.
.. note:: If n_jobs > 1, more memory is required as
``len(picks) * n_times`` additional time points need to
be temporaily stored in memory.
Parameters
----------
l_freq : float | None
Low cut-off frequency in Hz. If None the data are only low-passed.
h_freq : float | None
High cut-off frequency in Hz. If None the data are only
high-passed.
picks : array-like of int | None
Indices of channels to filter. If None only the data (MEG/EEG)
channels will be filtered.
filter_length : str (Default: '10s') | int | None
Length of the filter to use. If None or "len(x) < filter_length",
the filter length used is len(x). Otherwise, if int, overlap-add
filtering with a filter of the specified length in samples) is
used (faster for long signals). If str, a human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
to the shortest power-of-two length at least that duration.
Not used for 'iir' filters.
l_trans_bandwidth : float
Width of the transition band at the low cut-off frequency in Hz
(high pass or cutoff 1 in bandpass). Not used if 'order' is
specified in iir_params.
h_trans_bandwidth : float
Width of the transition band at the high cut-off frequency in Hz
(low pass or cutoff 2 in bandpass). Not used if 'order' is
specified in iir_params.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly, CUDA is initialized, and method='fft'.
method : str
'fft' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt).
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
raw : instance of Raw
The raw instance with filtered data.
See Also
--------
mne.Epochs.savgol_filter
mne.io.Raw.notch_filter
mne.io.Raw.resample
"""
fs = float(self.info['sfreq'])
if l_freq == 0:
l_freq = None
if h_freq is not None and h_freq > (fs / 2.):
h_freq = None
if l_freq is not None and not isinstance(l_freq, float):
l_freq = float(l_freq)
if h_freq is not None and not isinstance(h_freq, float):
h_freq = float(h_freq)
_check_preload(self, 'raw.filter')
if picks is None:
picks = _pick_data_or_ica(self.info)
# let's be safe.
if len(picks) < 1:
raise RuntimeError('Could not find any valid channels for '
'your Raw object. Please contact the '
'MNE-Python developers.')
# update info if filter is applied to all data channels,
# and it's not a band-stop filter
if h_freq is not None:
if (l_freq is None or l_freq < h_freq) and \
(self.info["lowpass"] is None or
h_freq < self.info['lowpass']):
self.info['lowpass'] = h_freq
if l_freq is not None:
if (h_freq is None or l_freq < h_freq) and \
(self.info["highpass"] is None or
l_freq > self.info['highpass']):
self.info['highpass'] = l_freq
else:
if h_freq is not None or l_freq is not None:
logger.info('Filtering a subset of channels. The highpass and '
'lowpass values in the measurement info will not '
'be updated.')
if l_freq is None and h_freq is not None:
logger.info('Low-pass filtering at %0.2g Hz' % h_freq)
low_pass_filter(self._data, fs, h_freq,
filter_length=filter_length,
trans_bandwidth=h_trans_bandwidth, method=method,
iir_params=iir_params, picks=picks, n_jobs=n_jobs,
copy=False)
if l_freq is not None and h_freq is None:
logger.info('High-pass filtering at %0.2g Hz' % l_freq)
high_pass_filter(self._data, fs, l_freq,
filter_length=filter_length,
trans_bandwidth=l_trans_bandwidth, method=method,
iir_params=iir_params, picks=picks, n_jobs=n_jobs,
copy=False)
if l_freq is not None and h_freq is not None:
if l_freq < h_freq:
logger.info('Band-pass filtering from %0.2g - %0.2g Hz'
% (l_freq, h_freq))
self._data = band_pass_filter(
self._data, fs, l_freq, h_freq,
filter_length=filter_length,
l_trans_bandwidth=l_trans_bandwidth,
h_trans_bandwidth=h_trans_bandwidth,
method=method, iir_params=iir_params, picks=picks,
n_jobs=n_jobs, copy=False)
else:
logger.info('Band-stop filtering from %0.2g - %0.2g Hz'
% (h_freq, l_freq))
self._data = band_stop_filter(
self._data, fs, h_freq, l_freq,
filter_length=filter_length,
l_trans_bandwidth=h_trans_bandwidth,
h_trans_bandwidth=l_trans_bandwidth, method=method,
iir_params=iir_params, picks=picks, n_jobs=n_jobs,
copy=False)
return self
@verbose
def notch_filter(self, freqs, picks=None, filter_length='10s',
notch_widths=None, trans_bandwidth=1.0, n_jobs=1,
method='fft', iir_params=None, mt_bandwidth=None,
p_value=0.05, verbose=None):
"""Notch filter a subset of channels.
Applies a zero-phase notch filter to the channels selected by
"picks". By default the data of the Raw object is modified inplace.
The Raw object has to have the data loaded e.g. with ``preload=True``
or ``self.load_data()``.
.. note:: If n_jobs > 1, more memory is required as
``len(picks) * n_times`` additional time points need to
be temporaily stored in memory.
Parameters
----------
freqs : float | array of float | None
Specific frequencies to filter out from data, e.g.,
np.arange(60, 241, 60) in the US or np.arange(50, 251, 50) in
Europe. None can only be used with the mode 'spectrum_fit',
where an F test is used to find sinusoidal components.
picks : array-like of int | None
Indices of channels to filter. If None only the data (MEG/EEG)
channels will be filtered.
filter_length : str (Default: '10s') | int | None
Length of the filter to use. If None or "len(x) < filter_length",
the filter length used is len(x). Otherwise, if int, overlap-add
filtering with a filter of the specified length in samples) is
used (faster for long signals). If str, a human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
to the shortest power-of-two length at least that duration.
Not used for 'iir' filters.
notch_widths : float | array of float | None
Width of each stop band (centred at each freq in freqs) in Hz.
If None, freqs / 200 is used.
trans_bandwidth : float
Width of the transition band in Hz.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly, CUDA is initialized, and method='fft'.
method : str
'fft' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt). 'spectrum_fit' will
use multi-taper estimation of sinusoidal components.
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
mt_bandwidth : float | None
The bandwidth of the multitaper windowing function in Hz.
Only used in 'spectrum_fit' mode.
p_value : float
p-value to use in F-test thresholding to determine significant
sinusoidal components to remove when method='spectrum_fit' and
freqs=None. Note that this will be Bonferroni corrected for the
number of frequencies, so large p-values may be justified.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
raw : instance of Raw
The raw instance with filtered data.
See Also
--------
mne.io.Raw.filter
Notes
-----
For details, see :func:`mne.filter.notch_filter`.
"""
fs = float(self.info['sfreq'])
if picks is None:
picks = _pick_data_or_ica(self.info)
# let's be safe.
if len(picks) < 1:
raise RuntimeError('Could not find any valid channels for '
'your Raw object. Please contact the '
'MNE-Python developers.')
_check_preload(self, 'raw.notch_filter')
self._data = notch_filter(
self._data, fs, freqs, filter_length=filter_length,
notch_widths=notch_widths, trans_bandwidth=trans_bandwidth,
method=method, iir_params=iir_params, mt_bandwidth=mt_bandwidth,
p_value=p_value, picks=picks, n_jobs=n_jobs, copy=False)
return self
@verbose
def resample(self, sfreq, npad='auto', window='boxcar', stim_picks=None,
n_jobs=1, events=None, copy=None, verbose=None):
"""Resample all channels.
The Raw object has to have the data loaded e.g. with ``preload=True``
or ``self.load_data()``.
.. warning:: The intended purpose of this function is primarily to
speed up computations (e.g., projection calculation) when
precise timing of events is not required, as downsampling
raw data effectively jitters trigger timings. It is
generally recommended not to epoch downsampled data,
but instead epoch and then downsample, as epoching
downsampled data jitters triggers.
For more, see
`this illustrative gist <https://gist.github.com/Eric89GXL/01642cb3789992fbca59>`_.
If resampling the continuous data is desired, it is
recommended to construct events using the original data.
The event onsets can be jointly resampled with the raw
data using the 'events' parameter.
Parameters
----------
sfreq : float
New sample rate to use.
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : string or tuple
Frequency-domain window to use in resampling.
See :func:`scipy.signal.resample`.
stim_picks : array of int | None
Stim channels. These channels are simply subsampled or
supersampled (without applying any filtering). This reduces
resampling artifacts in stim channels, but may lead to missing
triggers. If None, stim channels are automatically chosen using
:func:`mne.pick_types`.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly and CUDA is initialized.
events : 2D array, shape (n_events, 3) | None
An optional event matrix. When specified, the onsets of the events
are resampled jointly with the data.
copy : bool
Whether to operate on a copy of the data (True) or modify data
in-place (False). Defaults to False.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
raw : instance of Raw
The resampled version of the raw object.
See Also
--------
mne.io.Raw.filter
mne.Epochs.resample
Notes
-----
For some data, it may be more accurate to use ``npad=0`` to reduce
artifacts. This is dataset dependent -- check your data!
""" # noqa
_check_preload(self, 'raw.resample')
inst = _check_copy_dep(self, copy)
# When no event object is supplied, some basic detection of dropped
# events is performed to generate a warning. Finding events can fail
# for a variety of reasons, e.g. if no stim channel is present or it is
# corrupted. This should not stop the resampling from working. The
# warning should simply not be generated in this case.
if events is None:
try:
original_events = find_events(inst)
except:
pass
sfreq = float(sfreq)
o_sfreq = float(inst.info['sfreq'])
offsets = np.concatenate(([0], np.cumsum(inst._raw_lengths)))
new_data = list()
ratio = sfreq / o_sfreq
# set up stim channel processing
if stim_picks is None:
stim_picks = pick_types(inst.info, meg=False, ref_meg=False,
stim=True, exclude=[])
stim_picks = np.asanyarray(stim_picks)
for ri in range(len(inst._raw_lengths)):
data_chunk = inst._data[:, offsets[ri]:offsets[ri + 1]]
new_data.append(resample(data_chunk, sfreq, o_sfreq, npad,
window=window, n_jobs=n_jobs))
new_ntimes = new_data[ri].shape[1]
# In empirical testing, it was faster to resample all channels
# (above) and then replace the stim channels than it was to only
# resample the proper subset of channels and then use np.insert()
# to restore the stims.
if len(stim_picks) > 0:
stim_resampled = _resample_stim_channels(
data_chunk[stim_picks], new_data[ri].shape[1],
data_chunk.shape[1])
new_data[ri][stim_picks] = stim_resampled
inst._first_samps[ri] = int(inst._first_samps[ri] * ratio)
inst._last_samps[ri] = inst._first_samps[ri] + new_ntimes - 1
inst._raw_lengths[ri] = new_ntimes
inst._data = np.concatenate(new_data, axis=1)
inst.info['sfreq'] = sfreq
if inst.info.get('lowpass') is not None:
inst.info['lowpass'] = min(inst.info['lowpass'], sfreq / 2.)
inst._update_times()
# See the comment above why we ignore all errors here.
if events is None:
try:
# Did we loose events?
resampled_events = find_events(inst)
if len(resampled_events) != len(original_events):
warn('Resampling of the stim channels caused event '
'information to become unreliable. Consider finding '
'events on the original data and passing the event '
'matrix as a parameter.')
except:
pass
return inst
else:
if copy:
events = events.copy()
events[:, 0] = np.minimum(
np.round(events[:, 0] * ratio).astype(int),
inst._data.shape[1]
)
return inst, events
def crop(self, tmin=0.0, tmax=None, copy=None):
"""Crop raw data file.
Limit the data from the raw file to go between specific times. Note
that the new tmin is assumed to be t=0 for all subsequently called
functions (e.g., time_as_index, or Epochs). New first_samp and
last_samp are set accordingly.
Parameters
----------
tmin : float
New start time in seconds (must be >= 0).
tmax : float | None
New end time in seconds of the data (cannot exceed data duration).
copy : bool
This parameter has been deprecated and will be removed in 0.14.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
Returns
-------
raw : instance of Raw
The cropped raw object.
"""
raw = _check_copy_dep(self, copy)
max_time = (raw.n_times - 1) / raw.info['sfreq']
if tmax is None:
tmax = max_time
if tmin > tmax:
raise ValueError('tmin must be less than tmax')
if tmin < 0.0:
raise ValueError('tmin must be >= 0')
elif tmax > max_time:
raise ValueError('tmax must be less than or equal to the max raw '
'time (%0.4f sec)' % max_time)
smin, smax = np.where(_time_mask(self.times, tmin, tmax,
sfreq=self.info['sfreq']))[0][[0, -1]]
cumul_lens = np.concatenate(([0], np.array(raw._raw_lengths,
dtype='int')))
cumul_lens = np.cumsum(cumul_lens)
keepers = np.logical_and(np.less(smin, cumul_lens[1:]),
np.greater_equal(smax, cumul_lens[:-1]))
keepers = np.where(keepers)[0]
raw._first_samps = np.atleast_1d(raw._first_samps[keepers])
# Adjust first_samp of first used file!
raw._first_samps[0] += smin - cumul_lens[keepers[0]]
raw._last_samps = np.atleast_1d(raw._last_samps[keepers])
raw._last_samps[-1] -= cumul_lens[keepers[-1] + 1] - 1 - smax
raw._raw_extras = [r for ri, r in enumerate(raw._raw_extras)
if ri in keepers]
raw._filenames = [r for ri, r in enumerate(raw._filenames)
if ri in keepers]
if raw.preload:
# slice and copy to avoid the reference to large array
raw._data = raw._data[:, smin:smax + 1].copy()
raw._update_times()
return raw
@verbose
def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=10,
drop_small_buffer=False, proj=False, fmt='single',
overwrite=False, split_size='2GB', verbose=None):
"""Save raw data to file
Parameters
----------
fname : string
File name of the new dataset. This has to be a new filename
unless data have been preloaded. Filenames should end with
raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif
or raw_tsss.fif.gz.
picks : array-like of int | None
Indices of channels to include. If None all channels are kept.
tmin : float | None
Time in seconds of first sample to save. If None first sample
is used.
tmax : float | None
Time in seconds of last sample to save. If None last sample
is used.
buffer_size_sec : float | None
Size of data chunks in seconds. If None, the buffer size of
the original file is used.
drop_small_buffer : bool
Drop or not the last buffer. It is required by maxfilter (SSS)
that only accepts raw files with buffers of the same size.
proj : bool
If True the data is saved with the projections applied (active).
.. note:: If ``apply_proj()`` was used to apply the projections,
the projectons will be active even if ``proj`` is False.
fmt : str
Format to use to save raw data. Valid options are 'double',
'single', 'int', and 'short' for 64- or 32-bit float, or 32- or
16-bit integers, respectively. It is **strongly** recommended to
use 'single', as this is backward-compatible, and is standard for
maintaining precision. Note that using 'short' or 'int' may result
in loss of precision, complex data cannot be saved as 'short',
and neither complex data types nor real data stored as 'double'
can be loaded with the MNE command-line tools. See raw.orig_format
to determine the format the original data were stored in.
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
split_size : string | int
Large raw files are automatically split into multiple pieces. This
parameter specifies the maximum size of each piece. If the
parameter is an integer, it specifies the size in Bytes. It is
also possible to pass a human-readable string, e.g., 100MB.
.. note:: Due to FIFF file limitations, the maximum split
size is 2GB.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
If Raw is a concatenation of several raw files, **be warned** that
only the measurement information from the first raw file is stored.
This likely means that certain operations with external tools may not
work properly on a saved concatenated file (e.g., probably some
or all forms of SSS). It is recommended not to concatenate and
then save raw files for this reason.
"""
check_fname(fname, 'raw', ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif',
'raw.fif.gz', 'raw_sss.fif.gz',
'raw_tsss.fif.gz'))
split_size = _get_split_size(split_size)
fname = op.realpath(fname)
if not self.preload and fname in self._filenames:
raise ValueError('You cannot save data to the same file.'
' Please use a different filename.')
if self.preload:
if np.iscomplexobj(self._data):
warn('Saving raw file with complex data. Loading with '
'command-line MNE tools will not work.')
type_dict = dict(short=FIFF.FIFFT_DAU_PACK16,
int=FIFF.FIFFT_INT,
single=FIFF.FIFFT_FLOAT,
double=FIFF.FIFFT_DOUBLE)
if fmt not in type_dict.keys():
raise ValueError('fmt must be "short", "int", "single", '
'or "double"')
reset_dict = dict(short=False, int=False, single=True, double=True)
reset_range = reset_dict[fmt]
data_type = type_dict[fmt]
data_test = self[0, 0][0]
if fmt == 'short' and np.iscomplexobj(data_test):
raise ValueError('Complex data must be saved as "single" or '
'"double", not "short"')
# check for file existence
_check_fname(fname, overwrite)
if proj:
info = copy.deepcopy(self.info)
projector, info = setup_proj(info)
activate_proj(info['projs'], copy=False)
else:
info = self.info
projector = None
# set the correct compensation grade and make inverse compensator
inv_comp = None
if self.comp is not None:
inv_comp = linalg.inv(self.comp)
set_current_comp(info, self._orig_comp_grade)
#
# Set up the reading parameters
#
# Convert to samples
start = int(np.floor(tmin * self.info['sfreq']))
# "stop" is the first sample *not* to save, so we need +1's here
if tmax is None:
stop = np.inf
else:
stop = self.time_as_index(float(tmax), use_rounding=True)[0] + 1
stop = min(stop, self.last_samp - self.first_samp + 1)
buffer_size = self._get_buffer_size(buffer_size_sec)
# write the raw file
_write_raw(fname, self, info, picks, fmt, data_type, reset_range,
start, stop, buffer_size, projector, inv_comp,
drop_small_buffer, split_size, 0, None)
def plot(self, events=None, duration=10.0, start=0.0, n_channels=20,
bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
event_color='cyan', scalings=None, remove_dc=True, order='type',
show_options=False, title=None, show=True, block=False,
highpass=None, lowpass=None, filtorder=4, clipping=None):
"""Plot raw data
Parameters
----------
events : array | None
Events to show with vertical bars.
duration : float
Time window (sec) to plot in a given time.
start : float
Initial time to show (can be changed dynamically once plotted).
n_channels : int
Number of channels to plot at once. Defaults to 20.
bgcolor : color object
Color of the background.
color : dict | color object | None
Color for the data traces. If None, defaults to::
dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
emg='k', ref_meg='steelblue', misc='k', stim='k',
resp='k', chpi='k')
bad_color : color object
Color to make bad channels.
event_color : color object
Color to use for events.
scalings : dict | None
Scaling factors for the traces. If any fields in scalings are
'auto', the scaling factor is set to match the 99.5th percentile of
a subset of the corresponding data. If scalings == 'auto', all
scalings fields are set to 'auto'. If any fields are 'auto' and
data is not preloaded, a subset of times up to 100mb will be
loaded. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1,
resp=1, chpi=1e-4)
remove_dc : bool
If True remove DC component when plotting data.
order : 'type' | 'original' | array
Order in which to plot data. 'type' groups by channel type,
'original' plots in the order of ch_names, array gives the
indices to use in plotting.
show_options : bool
If True, a dialog for options related to projection is shown.
title : str | None
The title of the window. If None, and either the filename of the
raw object or '<unknown>' will be displayed as title.
show : bool
Show figures if True
block : bool
Whether to halt program execution until the figure is closed.
Useful for setting bad channels on the fly (click on line).
May not work on all systems / platforms.
highpass : float | None
Highpass to apply when displaying data.
lowpass : float | None
Lowpass to apply when displaying data.
filtorder : int
Filtering order. Note that for efficiency and simplicity,
filtering during plotting uses forward-backward IIR filtering,
so the effective filter order will be twice ``filtorder``.
Filtering the lines for display may also produce some edge
artifacts (at the left and right edges) of the signals
during display. Filtering requires scipy >= 0.10.
clipping : str | None
If None, channels are allowed to exceed their designated bounds in
the plot. If "clamp", then values are clamped to the appropriate
range for display, creating step-like artifacts. If "transparent",
then excessive values are not shown, creating gaps in the traces.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Raw traces.
Notes
-----
The arrow keys (up/down/left/right) can typically be used to navigate
between channels and time ranges, but this depends on the backend
matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).
The scaling can be adjusted with - and + (or =) keys. The viewport
dimensions can be adjusted with page up/page down and home/end keys.
Full screen mode can be to toggled with f11 key. To mark or un-mark a
channel as bad, click on the rather flat segments of a channel's time
series. The changes will be reflected immediately in the raw object's
``raw.info['bads']`` entry.
"""
return plot_raw(self, events, duration, start, n_channels, bgcolor,
color, bad_color, event_color, scalings, remove_dc,
order, show_options, title, show, block, highpass,
lowpass, filtorder, clipping)
@verbose
def plot_psd(self, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
proj=False, n_fft=2048, picks=None, ax=None,
color='black', area_mode='std', area_alpha=0.33,
n_overlap=0, dB=True, show=True, n_jobs=1, verbose=None):
"""Plot the power spectral density across channels
Parameters
----------
tmin : float
Start time for calculations.
tmax : float
End time for calculations.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
picks : array-like of int | None
List of channels to use. Cannot be None if `ax` is supplied. If
both `picks` and `ax` are None, separate subplots will be created
for each standard channel type (`mag`, `grad`, and `eeg`).
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
How to plot area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels)
will be plotted. Bad channels will be excluded from these
calculations. If None, no area will be plotted.
area_alpha : float
Alpha for the area.
n_overlap : int
The number of points of overlap between blocks. The default value
is 0 (no overlap).
dB : bool
If True, transform data to decibels.
show : bool
Call pyplot.show() at the end.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure with frequency spectra of the data channels.
"""
return plot_raw_psd(self, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
proj=proj, n_fft=n_fft, picks=picks, ax=ax,
color=color, area_mode=area_mode,
area_alpha=area_alpha, n_overlap=n_overlap,
dB=dB, show=show, n_jobs=n_jobs)
def plot_psd_topo(self, tmin=0., tmax=None, fmin=0, fmax=100, proj=False,
n_fft=2048, n_overlap=0, layout=None, color='w',
fig_facecolor='k', axis_facecolor='k', dB=True,
show=True, n_jobs=1, verbose=None):
"""Function for plotting channel wise frequency spectra as topography.
Parameters
----------
tmin : float
Start time for calculations. Defaults to zero.
tmax : float | None
End time for calculations. If None (default), the end of data is
used.
fmin : float
Start frequency to consider. Defaults to zero.
fmax : float
End frequency to consider. Defaults to 100.
proj : bool
Apply projection. Defaults to False.
n_fft : int
Number of points to use in Welch FFT calculations. Defaults to
2048.
n_overlap : int
The number of points of overlap between blocks. Defaults to 0
(no overlap).
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If None (default), the correct
layout is inferred from the data.
color : str | tuple
A matplotlib-compatible color to use for the curves. Defaults to
white.
fig_facecolor : str | tuple
A matplotlib-compatible color to use for the figure background.
Defaults to black.
axis_facecolor : str | tuple
A matplotlib-compatible color to use for the axis background.
Defaults to black.
dB : bool
If True, transform data to decibels. Defaults to True.
show : bool
Show figure if True. Defaults to True.
n_jobs : int
Number of jobs to run in parallel. Defaults to 1.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_raw_psd_topo(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, proj=proj, n_fft=n_fft,
n_overlap=n_overlap, layout=layout,
color=color, fig_facecolor=fig_facecolor,
axis_facecolor=axis_facecolor, dB=dB,
show=show, n_jobs=n_jobs, verbose=verbose)
def estimate_rank(self, tstart=0.0, tstop=30.0, tol=1e-4,
return_singular=False, picks=None, scalings='norm'):
"""Estimate rank of the raw data
This function is meant to provide a reasonable estimate of the rank.
The true rank of the data depends on many factors, so use at your
own risk.
Parameters
----------
tstart : float
Start time to use for rank estimation. Default is 0.0.
tstop : float | None
End time to use for rank estimation. Default is 30.0.
If None, the end time of the raw file is used.
tol : float
Tolerance for singular values to consider non-zero in
calculating the rank. The singular values are calculated
in this method such that independent data are expected to
have singular value around one.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
picks : array_like of int, shape (n_selected_channels,)
The channels to be considered for rank estimation.
If None (default) meg and eeg channels are included.
scalings : dict | 'norm'
To achieve reliable rank estimation on multiple sensors,
sensors have to be rescaled. This parameter controls the
rescaling. If dict, it will update the
following dict of defaults:
dict(mag=1e11, grad=1e9, eeg=1e5)
If 'norm' data will be scaled by internally computed
channel-wise norms.
Defaults to 'norm'.
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
Notes
-----
If data are not pre-loaded, the appropriate data will be loaded
by this function (can be memory intensive).
Projectors are not taken into account unless they have been applied
to the data using apply_proj(), since it is not always possible
to tell whether or not projectors have been applied previously.
Bad channels will be excluded from calculations.
"""
from ..cov import _estimate_rank_meeg_signals
start = max(0, self.time_as_index(tstart)[0])
if tstop is None:
stop = self.n_times - 1
else:
stop = min(self.n_times - 1, self.time_as_index(tstop)[0])
tslice = slice(start, stop + 1)
if picks is None:
picks = _pick_data_channels(self.info, exclude='bads',
with_ref_meg=False)
# ensure we don't get a view of data
if len(picks) == 1:
return 1.0, 1.0
# this should already be a copy, so we can overwrite it
data = self[picks, tslice][0]
out = _estimate_rank_meeg_signals(
data, pick_info(self.info, picks),
scalings=scalings, tol=tol, return_singular=return_singular)
return out
@property
def ch_names(self):
"""Channel names"""
return self.info['ch_names']
@property
def times(self):
"""Time points"""
return self._times
@property
def n_times(self):
"""Number of time points"""
return self.last_samp - self.first_samp + 1
def __len__(self):
"""The number of time points
Returns
-------
len : int
The number of time points.
Examples
--------
This can be used as::
>>> len(raw) # doctest: +SKIP
1000
"""
return self.n_times
def load_bad_channels(self, bad_file=None, force=False):
"""
Mark channels as bad from a text file
This function operates mostly in the style of the C function
``mne_mark_bad_channels``.
Parameters
----------
bad_file : string
File name of the text file containing bad channels
If bad_file = None, bad channels are cleared, but this
is more easily done directly as raw.info['bads'] = [].
force : boolean
Whether or not to force bad channel marking (of those
that exist) if channels are not found, instead of
raising an error.
"""
if bad_file is not None:
# Check to make sure bad channels are there
names = frozenset(self.info['ch_names'])
with open(bad_file) as fid:
bad_names = [l for l in fid.read().splitlines() if l]
names_there = [ci for ci in bad_names if ci in names]
count_diff = len(bad_names) - len(names_there)
if count_diff > 0:
if not force:
raise ValueError('Bad channels from:\n%s\n not found '
'in:\n%s' % (bad_file,
self._filenames[0]))
else:
warn('%d bad channels from:\n%s\nnot found in:\n%s'
% (count_diff, bad_file, self._filenames[0]))
self.info['bads'] = names_there
else:
self.info['bads'] = []
def append(self, raws, preload=None):
"""Concatenate raw instances as if they were continuous
Parameters
----------
raws : list, or Raw instance
list of Raw instances to concatenate to the current instance
(in order), or a single raw instance to concatenate.
preload : bool, str, or None (default None)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory). If preload is
None, preload=True or False is inferred using the preload status
of the raw files passed in.
"""
if not isinstance(raws, list):
raws = [raws]
# make sure the raws are compatible
all_raws = [self]
all_raws += raws
_check_raw_compatibility(all_raws)
# deal with preloading data first (while files are separate)
all_preloaded = self.preload and all(r.preload for r in raws)
if preload is None:
if all_preloaded:
preload = True
else:
preload = False
if preload is False:
if self.preload:
self._data = None
self.preload = False
else:
# do the concatenation ourselves since preload might be a string
nchan = self.info['nchan']
c_ns = np.cumsum([rr.n_times for rr in ([self] + raws)])
nsamp = c_ns[-1]
if not self.preload:
this_data = self._read_segment()
else:
this_data = self._data
# allocate the buffer
if isinstance(preload, string_types):
_data = np.memmap(preload, mode='w+', dtype=this_data.dtype,
shape=(nchan, nsamp))
else:
_data = np.empty((nchan, nsamp), dtype=this_data.dtype)
_data[:, 0:c_ns[0]] = this_data
for ri in range(len(raws)):
if not raws[ri].preload:
# read the data directly into the buffer
data_buffer = _data[:, c_ns[ri]:c_ns[ri + 1]]
raws[ri]._read_segment(data_buffer=data_buffer)
else:
_data[:, c_ns[ri]:c_ns[ri + 1]] = raws[ri]._data
self._data = _data
self.preload = True
# now combine information from each raw file to construct new self
for r in raws:
self._first_samps = np.r_[self._first_samps, r._first_samps]
self._last_samps = np.r_[self._last_samps, r._last_samps]
self._raw_extras += r._raw_extras
self._filenames += r._filenames
self.annotations = _combine_annotations((self.annotations,
r.annotations),
self._last_samps,
self._first_samps,
self.info['sfreq'])
self._update_times()
if not (len(self._first_samps) == len(self._last_samps) ==
len(self._raw_extras) == len(self._filenames)):
raise RuntimeError('Append error') # should never happen
def close(self):
"""Clean up the object.
Does nothing for objects that close their file descriptors.
Things like RawFIF will override this method.
"""
pass
def copy(self):
""" Return copy of Raw instance
"""
return deepcopy(self)
def __repr__(self):
name = self._filenames[0]
name = 'None' if name is None else op.basename(name)
s = ('%s, n_channels x n_times : %s x %s (%0.1f sec)'
% (name, len(self.ch_names), self.n_times, self.times[-1]))
return "<%s | %s>" % (self.__class__.__name__, s)
def add_events(self, events, stim_channel=None):
"""Add events to stim channel
Parameters
----------
events : ndarray, shape (n_events, 3)
Events to add. The first column specifies the sample number of
each event, the second column is ignored, and the third column
provides the event value. If events already exist in the Raw
instance at the given sample numbers, the event values will be
added together.
stim_channel : str | None
Name of the stim channel to add to. If None, the config variable
'MNE_STIM_CHANNEL' is used. If this is not found, it will default
to 'STI 014'.
Notes
-----
Data must be preloaded in order to add events.
"""
if not self.preload:
raise RuntimeError('cannot add events unless data are preloaded')
events = np.asarray(events)
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError('events must be shape (n_events, 3)')
stim_channel = _get_stim_channel(stim_channel, self.info)
pick = pick_channels(self.ch_names, stim_channel)
if len(pick) == 0:
raise ValueError('Channel %s not found' % stim_channel)
pick = pick[0]
idx = events[:, 0].astype(int)
if np.any(idx < self.first_samp) or np.any(idx > self.last_samp):
raise ValueError('event sample numbers must be between %s and %s'
% (self.first_samp, self.last_samp))
if not all(idx == events[:, 0]):
raise ValueError('event sample numbers must be integers')
self._data[pick, idx - self.first_samp] += events[:, 2]
def _get_buffer_size(self, buffer_size_sec=None):
"""Helper to get the buffer size"""
if buffer_size_sec is None:
if 'buffer_size_sec' in self.info:
buffer_size_sec = self.info['buffer_size_sec']
else:
buffer_size_sec = 10.0
return int(np.ceil(buffer_size_sec * self.info['sfreq']))
def _check_preload(raw, msg):
"""Helper to ensure data are preloaded"""
if not raw.preload:
raise RuntimeError(msg + ' requires raw data to be loaded. Use '
'preload=True (or string) in the constructor or '
'raw.load_data().')
def _allocate_data(data, data_buffer, data_shape, dtype):
"""Helper to data in memory or in memmap for preloading"""
if data is None:
# if not already done, allocate array with right type
if isinstance(data_buffer, string_types):
# use a memmap
data = np.memmap(data_buffer, mode='w+',
dtype=dtype, shape=data_shape)
else:
data = np.zeros(data_shape, dtype=dtype)
return data
def _index_as_time(index, sfreq, first_samp=0, use_first_samp=False):
"""Convert indices to time
Parameters
----------
index : list-like | int
List of ints or int representing points in time.
use_first_samp : boolean
If True, the time returned is relative to the session onset, else
relative to the recording onset.
Returns
-------
times : ndarray
Times corresponding to the index supplied.
"""
times = np.atleast_1d(index) + (first_samp if use_first_samp else 0)
return times / sfreq
class _RawShell():
"""Used for creating a temporary raw object"""
def __init__(self):
self.first_samp = None
self.last_samp = None
self._cals = None
self._rawdir = None
self._projector = None
@property
def n_times(self):
return self.last_samp - self.first_samp + 1
###############################################################################
# Writing
def _write_raw(fname, raw, info, picks, fmt, data_type, reset_range, start,
stop, buffer_size, projector, inv_comp, drop_small_buffer,
split_size, part_idx, prev_fname):
"""Write raw file with splitting
"""
# we've done something wrong if we hit this
n_times_max = len(raw.times)
if start >= stop or stop > n_times_max:
raise RuntimeError('Cannot write raw file with no data: %s -> %s '
'(max: %s) requested' % (start, stop, n_times_max))
if part_idx > 0:
# insert index in filename
base, ext = op.splitext(fname)
use_fname = '%s-%d%s' % (base, part_idx, ext)
else:
use_fname = fname
logger.info('Writing %s' % use_fname)
fid, cals = _start_writing_raw(use_fname, info, picks, data_type,
reset_range, raw.annotations)
use_picks = slice(None) if picks is None else picks
first_samp = raw.first_samp + start
if first_samp != 0:
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first_samp)
# previous file name and id
if part_idx > 0 and prev_fname is not None:
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_PREV_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, prev_fname)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, info['meas_id'])
write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx - 1)
end_block(fid, FIFF.FIFFB_REF)
pos_prev = fid.tell()
if pos_prev > split_size:
raise ValueError('file is larger than "split_size" after writing '
'measurement information, you must use a larger '
'value for split size: %s plus enough bytes for '
'the chosen buffer_size' % pos_prev)
next_file_buffer = 2 ** 20 # extra cushion for last few post-data tags
for first in range(start, stop, buffer_size):
# Write blocks <= buffer_size in size
last = min(first + buffer_size, stop)
data, times = raw[use_picks, first:last]
assert len(times) == last - first
if projector is not None:
data = np.dot(projector, data)
if ((drop_small_buffer and (first > start) and
(len(times) < buffer_size))):
logger.info('Skipping data chunk due to small buffer ... '
'[done]')
break
logger.info('Writing ...')
_write_raw_buffer(fid, data, cals, fmt, inv_comp)
pos = fid.tell()
this_buff_size_bytes = pos - pos_prev
overage = pos - split_size + next_file_buffer
if overage > 0:
# This should occur on the first buffer write of the file, so
# we should mention the space required for the meas info
raise ValueError(
'buffer size (%s) is too large for the given split size (%s) '
'by %s bytes after writing info (%s) and leaving enough space '
'for end tags (%s): decrease "buffer_size_sec" or increase '
'"split_size".' % (this_buff_size_bytes, split_size, overage,
pos_prev, next_file_buffer))
# Split files if necessary, leave some space for next file info
# make sure we check to make sure we actually *need* another buffer
# with the "and" check
if pos >= split_size - this_buff_size_bytes - next_file_buffer and \
first + buffer_size < stop:
next_fname, next_idx = _write_raw(
fname, raw, info, picks, fmt,
data_type, reset_range, first + buffer_size, stop, buffer_size,
projector, inv_comp, drop_small_buffer, split_size,
part_idx + 1, use_fname)
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, info['meas_id'])
write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
end_block(fid, FIFF.FIFFB_REF)
break
pos_prev = pos
logger.info('Closing %s [done]' % use_fname)
if info.get('maxshield', False):
end_block(fid, FIFF.FIFFB_SMSH_RAW_DATA)
else:
end_block(fid, FIFF.FIFFB_RAW_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
return use_fname, part_idx
def _start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,
reset_range=True, annotations=None):
"""Start write raw data in file
Data will be written in float
Parameters
----------
name : string
Name of the file to create.
info : dict
Measurement info.
sel : array of int, optional
Indices of channels to include. By default all channels are included.
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), 16 (FIFFT_DAU_PACK16), or 3 (FIFFT_INT) for raw data.
reset_range : bool
If True, the info['chs'][k]['range'] parameter will be set to unity.
annotations : instance of Annotations or None
The annotations to write.
Returns
-------
fid : file
The file descriptor.
cals : list
calibration factors.
"""
#
# Measurement info
#
info = pick_info(info, sel)
#
# Create the file and save the essentials
#
fid = start_file(name)
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
cals = []
for k in range(info['nchan']):
#
# Scan numbers may have been messed up
#
info['chs'][k]['scanno'] = k + 1 # scanno starts at 1 in FIF format
if reset_range is True:
info['chs'][k]['range'] = 1.0
cals.append(info['chs'][k]['cal'] * info['chs'][k]['range'])
write_meas_info(fid, info, data_type=data_type, reset_range=reset_range)
#
# Annotations
#
if annotations is not None:
start_block(fid, FIFF.FIFFB_MNE_ANNOTATIONS)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, annotations.onset)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX,
annotations.duration + annotations.onset)
# To allow : in description, they need to be replaced for serialization
write_name_list(fid, FIFF.FIFF_COMMENT, [d.replace(':', ';') for d in
annotations.description])
if annotations.orig_time is not None:
write_double(fid, FIFF.FIFF_MEAS_DATE, annotations.orig_time)
end_block(fid, FIFF.FIFFB_MNE_ANNOTATIONS)
#
# Start the raw data
#
if info.get('maxshield', False):
start_block(fid, FIFF.FIFFB_SMSH_RAW_DATA)
else:
start_block(fid, FIFF.FIFFB_RAW_DATA)
return fid, cals
def _write_raw_buffer(fid, buf, cals, fmt, inv_comp):
"""Write raw buffer
Parameters
----------
fid : file descriptor
an open raw data file.
buf : array
The buffer to write.
cals : array
Calibration factors.
fmt : str
'short', 'int', 'single', or 'double' for 16/32 bit int or 32/64 bit
float for each item. This will be doubled for complex datatypes. Note
that short and int formats cannot be used for complex data.
inv_comp : array | None
The CTF compensation matrix used to revert compensation
change when reading.
"""
if buf.shape[0] != len(cals):
raise ValueError('buffer and calibration sizes do not match')
if fmt not in ['short', 'int', 'single', 'double']:
raise ValueError('fmt must be "short", "single", or "double"')
if np.isrealobj(buf):
if fmt == 'short':
write_function = write_dau_pack16
elif fmt == 'int':
write_function = write_int
elif fmt == 'single':
write_function = write_float
else:
write_function = write_double
else:
if fmt == 'single':
write_function = write_complex64
elif fmt == 'double':
write_function = write_complex128
else:
raise ValueError('only "single" and "double" supported for '
'writing complex data')
if inv_comp is not None:
buf = np.dot(inv_comp / np.ravel(cals)[:, None], buf)
else:
buf = buf / np.ravel(cals)[:, None]
write_function(fid, FIFF.FIFF_DATA_BUFFER, buf)
def _my_hilbert(x, n_fft=None, envelope=False):
""" Compute Hilbert transform of signals w/ zero padding.
Parameters
----------
x : array, shape (n_times)
The signal to convert
n_fft : int, length > x.shape[-1] | None
How much to pad the signal before Hilbert transform.
Note that signal will then be cut back to original length.
envelope : bool
Whether to compute amplitude of the hilbert transform in order
to return the signal envelope.
Returns
-------
out : array, shape (n_times)
The hilbert transform of the signal, or the envelope.
"""
from scipy.signal import hilbert
n_fft = x.shape[-1] if n_fft is None else n_fft
n_x = x.shape[-1]
out = hilbert(x, N=n_fft)[:n_x]
if envelope is True:
out = np.abs(out)
return out
def _check_raw_compatibility(raw):
"""Check to make sure all instances of Raw
in the input list raw have compatible parameters"""
for ri in range(1, len(raw)):
if not isinstance(raw[ri], type(raw[0])):
raise ValueError('raw[%d] type must match' % ri)
if not raw[ri].info['nchan'] == raw[0].info['nchan']:
raise ValueError('raw[%d][\'info\'][\'nchan\'] must match' % ri)
if not raw[ri].info['bads'] == raw[0].info['bads']:
raise ValueError('raw[%d][\'info\'][\'bads\'] must match' % ri)
if not raw[ri].info['sfreq'] == raw[0].info['sfreq']:
raise ValueError('raw[%d][\'info\'][\'sfreq\'] must match' % ri)
if not set(raw[ri].info['ch_names']) == set(raw[0].info['ch_names']):
raise ValueError('raw[%d][\'info\'][\'ch_names\'] must match' % ri)
if not all(raw[ri]._cals == raw[0]._cals):
raise ValueError('raw[%d]._cals must match' % ri)
if len(raw[0].info['projs']) != len(raw[ri].info['projs']):
raise ValueError('SSP projectors in raw files must be the same')
if not all(_proj_equal(p1, p2) for p1, p2 in
zip(raw[0].info['projs'], raw[ri].info['projs'])):
raise ValueError('SSP projectors in raw files must be the same')
if not all(r.orig_format == raw[0].orig_format for r in raw):
warn('raw files do not all have the same data format, could result in '
'precision mismatch. Setting raw.orig_format="unknown"')
raw[0].orig_format = 'unknown'
def concatenate_raws(raws, preload=None, events_list=None):
"""Concatenate raw instances as if they were continuous. Note that raws[0]
is modified in-place to achieve the concatenation.
Parameters
----------
raws : list
list of Raw instances to concatenate (in order).
preload : bool, or None
If None, preload status is inferred using the preload status of the
raw files passed in. True or False sets the resulting raw file to
have or not have data preloaded.
events_list : None | list
The events to concatenate. Defaults to None.
Returns
-------
raw : instance of Raw
The result of the concatenation (first Raw instance passed in).
events : ndarray of int, shape (n events, 3)
The events. Only returned if `event_list` is not None.
"""
if events_list is not None:
if len(events_list) != len(raws):
raise ValueError('`raws` and `event_list` are required '
'to be of the same length')
first, last = zip(*[(r.first_samp, r.last_samp) for r in raws])
events = concatenate_events(events_list, first, last)
raws[0].append(raws[1:], preload)
if events_list is None:
return raws[0]
else:
return raws[0], events
def _check_update_montage(info, montage, path=None, update_ch_names=False):
""" Helper function for eeg readers to add montage"""
if montage is not None:
if not isinstance(montage, (string_types, Montage)):
err = ("Montage must be str, None, or instance of Montage. "
"%s was provided" % type(montage))
raise TypeError(err)
if montage is not None:
if isinstance(montage, string_types):
montage = read_montage(montage, path=path)
_set_montage(info, montage, update_ch_names=update_ch_names)
missing_positions = []
exclude = (FIFF.FIFFV_EOG_CH, FIFF.FIFFV_MISC_CH,
FIFF.FIFFV_STIM_CH)
for ch in info['chs']:
if not ch['kind'] in exclude:
if np.unique(ch['loc']).size == 1:
missing_positions.append(ch['ch_name'])
# raise error if positions are missing
if missing_positions:
raise KeyError(
"The following positions are missing from the montage "
"definitions: %s. If those channels lack positions "
"because they are EOG channels use the eog parameter."
% str(missing_positions))
| bsd-3-clause |
fredRos/pypmc | examples/markov_chain.py | 1 | 1798 | '''This example illustrates how to run a Markov Chain using pypmc'''
import numpy as np
import pypmc
# define a proposal
prop_dof = 1.
prop_sigma = np.array([[0.1 , 0. ]
,[0. , 0.02]])
prop = pypmc.density.student_t.LocalStudentT(prop_sigma, prop_dof)
# define the target; i.e., the function you want to sample from.
# In this case, it is a Gaussian with mean "target_mean" and
# covariance "target_sigma".
#
# Note that the target function "log_target" returns the log of the
# unnormalized gaussian density.
target_sigma = np.array([[0.01 , 0.003 ]
,[0.003, 0.0025]])
inv_target_sigma = np.linalg.inv(target_sigma)
target_mean = np.array([4.3, 1.1])
def unnormalized_log_pdf_gauss(x, mu, inv_sigma):
diff = x - mu
return -0.5 * diff.dot(inv_sigma).dot(diff)
log_target = lambda x: unnormalized_log_pdf_gauss(x, target_mean, inv_target_sigma)
# choose a bad initialization
start = np.array([-2., 10.])
# define the markov chain object
mc = pypmc.sampler.markov_chain.AdaptiveMarkovChain(log_target, prop, start)
# run burn-in
mc.run(10**4)
# delete burn-in from samples
mc.clear()
# run 100,000 steps adapting the proposal every 500 steps
# hereby save the accept count which is returned by mc.run
accept_count = 0
for i in range(200):
accept_count += mc.run(500)
mc.adapt()
# extract a reference to the history of all visited points
values = mc.samples[:]
accept_rate = float(accept_count) / len(values)
print("The chain accepted %4.2f%% of the proposed points" % (accept_rate * 100) )
# plot the result
try:
import matplotlib.pyplot as plt
except ImportError:
print('For plotting "matplotlib" needs to be installed')
exit(1)
plt.hexbin(values[:,0], values[:,1], gridsize = 40, cmap='gray_r')
plt.show()
| gpl-2.0 |
procoder317/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
tomlof/scikit-learn | sklearn/utils/setup.py | 77 | 2993 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.pyx'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.pyx'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension('murmurhash',
sources=['murmurhash.pyx', join(
'src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.pyx', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.pyx'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.pyx'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.pyx'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.pyx'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
Tong-Chen/scikit-learn | examples/exercises/plot_cv_digits.py | 7 | 1177 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial excercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import pylab as pl
pl.figure(1, figsize=(4, 3))
pl.clf()
pl.semilogx(C_s, scores)
pl.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
pl.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = pl.yticks()
pl.yticks(locs, map(lambda x: "%g" % x, locs))
pl.ylabel('CV score')
pl.xlabel('Parameter C')
pl.ylim(0, 1.1)
pl.show()
| bsd-3-clause |
YinongLong/scikit-learn | sklearn/utils/tests/test_multiclass.py | 6 | 13417 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
mrawls/APO-1m-phot | compare_tgd.py | 1 | 8531 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
'''
Compare multiple measurements of Teff, logg, and distance for RG/EBs.
'''
dfile = 'RGEB_distinfo.txt'
tgfile = '../../RGEB_tefflogg.txt'
colors = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00']
# distance file column info
KICcol1 = 0
distcol = 7; disterrcol = 8
DR12distcol = 9; DR12disterrcol = 10
# teff & logg file column info
KICcol2 = 0
MOOGtcol = 1; MOOGterrcol = 2
MOOGgcol = 3; MOOGgerrcol = 4
MOOGfecol = 5; MOOGfeerrcol = 6
DR12tcol = 7; DR12terrcol = 8
DR12gcol = 9; DR12gerrcol = 10
DR12fecol = 11; DR12feerrcol = 12
Cannontcol = 13; Cannonterrcol = 14
Cannongcol = 15; Cannongerrcol = 16
Cannonfecol = 17; Cannonfeerrcol = 18
KICtcol = 19; KICterrcol = 20
KICgcol = 21; KICgerrcol = 22
KICfecol = 23; KICfeerrcol = 24
MESAtcol = 25; MESAterrcol = 26
MESAgcol = 27; MESAgerrcol = 28
# logg column info only
ELCgcol = 29; ELCgerrcol = 30
Seismicgcol = 31; Seismicgerrcol = 32
# we'll use KICcol1 as the master target list because it's numerically sorted
KICs = np.loadtxt(dfile, comments='#', usecols=(0,), unpack=True)
KICtgs = np.loadtxt(tgfile, comments='#', usecols=(0,), unpack=True)
# create x axis label string list
strKICs = [' ',]
for KIC in KICs:
strKICs.append(str(int(KIC)))
strKICs.append(' ')
xaxis = np.arange(0,len(KICs))
# load the rest of the data (distances)
dists = np.loadtxt(dfile, comments='#', usecols=(distcol, disterrcol), unpack=True)
DR12ds = np.loadtxt(dfile, comments='#', usecols=(DR12distcol, DR12disterrcol), unpack=True)
# load the rest of the data (teffs, loggs)
MOOGts = np.loadtxt(tgfile, comments='#', usecols=(MOOGtcol, MOOGterrcol), unpack=True)
MOOGgs = np.loadtxt(tgfile, comments='#', usecols=(MOOGgcol, MOOGgerrcol), unpack=True)
MOOGfes = np.loadtxt(tgfile, comments='#', usecols=(MOOGfecol, MOOGfeerrcol), unpack=True)
DR12ts = np.loadtxt(tgfile, comments='#', usecols=(DR12tcol, DR12terrcol), unpack=True)
DR12gs = np.loadtxt(tgfile, comments='#', usecols=(DR12gcol, DR12gerrcol), unpack=True)
DR12fes = np.loadtxt(tgfile, comments='#', usecols=(DR12fecol, DR12feerrcol), unpack=True)
Cannonts = np.loadtxt(tgfile, comments='#', usecols=(Cannontcol, Cannonterrcol), unpack=True)
Cannongs = np.loadtxt(tgfile, comments='#', usecols=(Cannongcol, Cannongerrcol), unpack=True)
Cannonfes = np.loadtxt(tgfile, comments='#', usecols=(Cannonfecol, Cannonfeerrcol), unpack=True)
KICts = np.loadtxt(tgfile, comments='#', usecols=(KICtcol, KICterrcol), unpack=True)
KICgs = np.loadtxt(tgfile, comments='#', usecols=(KICgcol, KICgerrcol), unpack=True)
KICfes = np.loadtxt(tgfile, comments='#', usecols=(KICfecol, KICfeerrcol), unpack=True)
MESAts = np.loadtxt(tgfile, comments='#', usecols=(MESAtcol, MESAterrcol), unpack=True)
MESAgs = np.loadtxt(tgfile, comments='#', usecols=(MESAgcol, MESAgerrcol), unpack=True)
ELCgs = np.loadtxt(tgfile, comments='#', usecols=(ELCgcol, ELCgerrcol), unpack=True)
#Seismicgs = np.loadtxt(tgfile, comments='#', usecols=(Seismicgcol, Seismicgerrcol), unpack=True)
# sort all the teff and logg arrays so they align with xaxis
MOOGts[0] = MOOGts[0][np.argsort(KICtgs)]; MOOGgs[0] = MOOGgs[0][np.argsort(KICtgs)]
MOOGts[1] = MOOGts[1][np.argsort(KICtgs)]; MOOGgs[1] = MOOGgs[1][np.argsort(KICtgs)]
DR12ts[0] = DR12ts[0][np.argsort(KICtgs)]; DR12gs[0] = DR12gs[0][np.argsort(KICtgs)]
DR12ts[1] = DR12ts[1][np.argsort(KICtgs)]; DR12gs[1] = DR12gs[1][np.argsort(KICtgs)]
Cannonts[0] = Cannonts[0][np.argsort(KICtgs)]; Cannongs[0] = Cannongs[0][np.argsort(KICtgs)]
Cannonts[1] = Cannonts[1][np.argsort(KICtgs)]; Cannongs[1] = Cannongs[1][np.argsort(KICtgs)]
KICts[0] = KICts[0][np.argsort(KICtgs)]; KICgs[0] = KICgs[0][np.argsort(KICtgs)]
KICts[1] = KICts[1][np.argsort(KICtgs)]; KICgs[1] = KICgs[1][np.argsort(KICtgs)]
MESAts[0] = MESAts[0][np.argsort(KICtgs)]; MESAgs[0] = MESAgs[0][np.argsort(KICtgs)]
MESAts[1] = MESAts[1][np.argsort(KICtgs)]; MESAgs[1] = MESAgs[1][np.argsort(KICtgs)]
ELCgs[0] = ELCgs[0][np.argsort(KICtgs)]
ELCgs[1] = ELCgs[1][np.argsort(KICtgs)]
#Seismicgs[0] = Seismicgs[0][np.argsort(KICtgs)]
#Seismicgs[1] = Seismicgs[1][np.argsort(KICtgs)]
MOOGfes[0] = MOOGfes[0][np.argsort(KICtgs)]; MOOGfes[1] = MOOGfes[1][np.argsort(KICtgs)]
DR12fes[0] = DR12fes[0][np.argsort(KICtgs)]; DR12fes[1] = DR12fes[1][np.argsort(KICtgs)]
Cannonfes[0] = Cannonfes[0][np.argsort(KICtgs)]; Cannonfes[1] = Cannonfes[1][np.argsort(KICtgs)]
KICfes[0] = KICfes[0][np.argsort(KICtgs)]; KICfes[1] = KICfes[1][np.argsort(KICtgs)]
# WHEW THAT WAS FUN
KICtgs = KICtgs[np.argsort(KICtgs)]
# set up figures
fig = plt.figure(1, figsize=(15,10))
plt.subplots_adjust(hspace=0)
# top panel - TEMPERATURES
ax1 = plt.subplot(4,1,1)
ax1.set_ylabel(r'$T_{\textrm{eff}}$ (K)', size=26)
ax1.set_xlim([-0.5,len(xaxis)-0.5])
ax1.set_ylim([4405, 5200])
ax1.set_xticklabels(strKICs, size=22)
ax1.xaxis.tick_top()
plt.errorbar(xaxis-0.1, DR12ts[0], yerr=DR12ts[1], ls='None', marker='o', ms=10, c=colors[1], label='DR12')
plt.errorbar(xaxis-0.05, Cannonts[0], yerr=Cannonts[1], ls='None', marker='s', ms=10, c=colors[2], label='Cannon')
plt.errorbar(xaxis+0.05, KICts[0], yerr=KICts[1], ls='None', marker='^', ms=10, c=colors[3], label='KIC')
plt.errorbar(xaxis+0.1, MESAts[0], ls='None', marker='h', ms=12, c=colors[4], label='MESA')
plt.errorbar(xaxis, MOOGts[0], yerr=MOOGts[1], ls='None', marker='D', ms=10, c=colors[0], label='MOOG')
plt.axvspan(0.5,1.5, alpha=0.1, color='k')
plt.axvspan(2.5,3.5, alpha=0.1, color='k')
plt.axvspan(4.5,5.5, alpha=0.1, color='k')
#leg1 = ax1.legend(bbox_to_anchor=(1.1,0.68), numpoints=1, loc=1, borderaxespad=0.,
# frameon=True, handletextpad=0.2, prop={'size':18})
#leg1.get_frame().set_linewidth(0.0)
leg1 = ax1.legend(bbox_to_anchor=(1.1,0.90), numpoints=1, loc=1, borderaxespad=0.,
frameon=True, handletextpad=0.2, prop={'size':18})
leg1.get_frame().set_linewidth(0.0)
# middle-top panel - LOGGS
ax2 = plt.subplot(4,1,2)
ax2.set_ylabel(r'$\log g$ (cgs)', size=26)
ax2.set_xlim([-0.5,len(xaxis)-0.5])
ax2.set_ylim([1.8, 3.7])
ax2.set_xticklabels([])
plt.errorbar(xaxis-0.1, DR12gs[0], yerr=DR12gs[1], ls='None', marker='o', ms=10, c=colors[1])
plt.errorbar(xaxis-0.05, Cannongs[0], yerr=Cannongs[1], ls='None', marker='s', ms=10, c=colors[2])
plt.errorbar(xaxis+0.05, KICgs[0], yerr=KICgs[1], ls='None', marker='^', ms=10, c=colors[3])
plt.errorbar(xaxis+0.1, MESAgs[0], ls='None', marker='h', ms=12, c=colors[4])
plt.errorbar(xaxis, MOOGgs[0], yerr=MOOGgs[1], ls='None', marker='D', ms=10, c=colors[0])
plt.errorbar(xaxis, ELCgs[0], yerr=ELCgs[1], ls='None', marker='*', ms=20, c=colors[0], label='This work')
#plt.errorbar(xaxis, Seismicgs[0], yerr=Seismicgs[1], ls='None', marker='*', ms=20, c='k', label='Seismic')
plt.axvspan(0.5,1.5, alpha=0.1, color='k')
plt.axvspan(2.5,3.5, alpha=0.1, color='k')
plt.axvspan(4.5,5.5, alpha=0.1, color='k')
#leg2 = ax2.legend(bbox_to_anchor=(1.12,1.01), numpoints=1, loc=1, borderaxespad=0.,
# frameon=True, handletextpad=0.2, prop={'size':18})
#leg2.get_frame().set_linewidth(0.0)
leg2 = ax2.legend(bbox_to_anchor=(1.12,1.005), numpoints=1, loc=1, borderaxespad=0.,
frameon=True, handletextpad=0.2, prop={'size':18})
leg2.get_frame().set_linewidth(0.0)
# middle-bottom panel - METALLICITIES
ax3 = plt.subplot(4,1,3)
ax3.set_ylabel(r'[Fe/H]', size=26)
ax3.set_xlim([-0.5,len(xaxis)-0.5])
ax3.set_ylim([-0.9, 0.55])
ax3.set_xticklabels([])
plt.errorbar(xaxis-0.1, DR12fes[0], yerr=DR12fes[1], ls='None', marker='o', ms=10, c=colors[1])
plt.errorbar(xaxis-0.05, Cannonfes[0], yerr=Cannonfes[1], ls='None', marker='s', ms=10, c=colors[2])
plt.errorbar(xaxis+0.05, KICfes[0], yerr=KICfes[1], ls='None', marker='^', ms=10, c=colors[3])
plt.errorbar(xaxis, MOOGfes[0], yerr=MOOGfes[1], ls='None', marker='D', ms=10, c=colors[0])
plt.axvspan(0.5,1.5, alpha=0.1, color='k')
plt.axvspan(2.5,3.5, alpha=0.1, color='k')
plt.axvspan(4.5,5.5, alpha=0.1, color='k')
# bottom panel - DISTANCES
ax4 = plt.subplot(4,1,4)
ax4.set_ylabel(r'$d$ (kpc)', size=26)
ax4.set_xlim([-0.5,len(xaxis)-0.5])
ax4.set_ylim([0.5, 3.9])
ax4.set_xticklabels(strKICs, size=22)
plt.errorbar(xaxis-0.1, DR12ds[0]/1000, yerr=DR12ds[1]/1000, ls='None', marker='o', ms=10, c=colors[1])
plt.errorbar(xaxis, dists[0]/1000, yerr=dists[1]/1000, ls='None', marker='*', ms=20, c=colors[0])
plt.axvspan(0.5,1.5, alpha=0.1, color='k')
plt.axvspan(2.5,3.5, alpha=0.1, color='k')
plt.axvspan(4.5,5.5, alpha=0.1, color='k')
plt.show() | mit |
deepesch/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
quheng/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/extension/test_categorical.py | 2 | 7432 | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import string
import numpy as np
import pytest
import pandas as pd
from pandas import Categorical
from pandas.api.types import CategoricalDtype
from pandas.tests.extension import base
import pandas.util.testing as tm
def make_data():
while True:
values = np.random.choice(list(string.ascii_letters), size=100)
# ensure we meet the requirements
# 1. first two not null
# 2. first and second are different
if values[0] != values[1]:
break
return values
@pytest.fixture
def dtype():
return CategoricalDtype()
@pytest.fixture
def data():
"""Length-100 array for this type.
* data[0] and data[1] should both be non missing
* data[0] and data[1] should not gbe equal
"""
return Categorical(make_data())
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return Categorical([np.nan, "A"])
@pytest.fixture
def data_for_sorting():
return Categorical(["A", "B", "C"], categories=["C", "A", "B"], ordered=True)
@pytest.fixture
def data_missing_for_sorting():
return Categorical(["A", None, "B"], categories=["B", "A"], ordered=True)
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def data_for_grouping():
return Categorical(["a", "a", None, None, "b", "b", "a", "c"])
class TestDtype(base.BaseDtypeTests):
pass
class TestInterface(base.BaseInterfaceTests):
@pytest.mark.skip(reason="Memory usage doesn't match")
def test_memory_usage(self, data):
# Is this deliberate?
super().test_memory_usage(data)
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
def test_ravel(self, data):
# GH#27199 Categorical.ravel returns self until after deprecation cycle
with tm.assert_produces_warning(FutureWarning):
data.ravel()
class TestGetitem(base.BaseGetitemTests):
skip_take = pytest.mark.skip(reason="GH-20664.")
@pytest.mark.skip(reason="Backwards compatibility")
def test_getitem_scalar(self, data):
# CategoricalDtype.type isn't "correct" since it should
# be a parent of the elements (object). But don't want
# to break things by changing.
super().test_getitem_scalar(data)
@skip_take
def test_take(self, data, na_value, na_cmp):
# TODO remove this once Categorical.take is fixed
super().test_take(data, na_value, na_cmp)
@skip_take
def test_take_negative(self, data):
super().test_take_negative(data)
@skip_take
def test_take_pandas_style_negative_raises(self, data, na_value):
super().test_take_pandas_style_negative_raises(data, na_value)
@skip_take
def test_take_non_na_fill_value(self, data_missing):
super().test_take_non_na_fill_value(data_missing)
@skip_take
def test_take_out_of_bounds_raises(self, data, allow_fill):
return super().test_take_out_of_bounds_raises(data, allow_fill)
@pytest.mark.skip(reason="GH-20747. Unobserved categories.")
def test_take_series(self, data):
super().test_take_series(data)
@skip_take
def test_reindex_non_na_fill_value(self, data_missing):
super().test_reindex_non_na_fill_value(data_missing)
@pytest.mark.skip(reason="Categorical.take buggy")
def test_take_empty(self, data, na_value, na_cmp):
super().test_take_empty(data, na_value, na_cmp)
@pytest.mark.skip(reason="test not written correctly for categorical")
def test_reindex(self, data, na_value):
super().test_reindex(data, na_value)
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_pad(self, data_missing):
super().test_fillna_limit_pad(data_missing)
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_backfill(self, data_missing):
super().test_fillna_limit_backfill(data_missing)
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="Unobserved categories included")
def test_value_counts(self, all_data, dropna):
return super().test_value_counts(all_data, dropna)
def test_combine_add(self, data_repeated):
# GH 20825
# When adding categoricals in combine, result is a string
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 + x2)
expected = pd.Series(
([a + b for (a, b) in zip(list(orig_data1), list(orig_data2))])
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 + x2)
expected = pd.Series([a + val for a in list(orig_data1)])
self.assert_series_equal(result, expected)
@pytest.mark.skip(reason="Not Applicable")
def test_fillna_length_mismatch(self, data_missing):
super().test_fillna_length_mismatch(data_missing)
def test_searchsorted(self, data_for_sorting):
if not data_for_sorting.ordered:
raise pytest.skip(reason="searchsorted requires ordered data.")
class TestCasting(base.BaseCastingTests):
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
op_name = all_arithmetic_operators
if op_name != "__rmod__":
super().test_arith_series_with_scalar(data, op_name)
else:
pytest.skip("rmod never called when string is first argument")
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
with pytest.raises(TypeError, match="cannot perform"):
ser + data
def test_divmod_series_array(self):
# GH 23287
# skipping because it is not implemented
pass
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
return super()._check_divmod_op(s, op, other, exc=TypeError)
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
if op_name == "__eq__":
result = op(s, other)
expected = s.combine(other, lambda x, y: x == y)
assert (result == expected).all()
elif op_name == "__ne__":
result = op(s, other)
expected = s.combine(other, lambda x, y: x != y)
assert (result == expected).all()
else:
with pytest.raises(TypeError):
op(data, other)
class TestParsing(base.BaseParsingTests):
pass
| apache-2.0 |
LohithBlaze/scikit-learn | sklearn/externals/joblib/parallel.py | 86 | 35087 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None,
max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
shahankhatch/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
hilaglanz/TCE | DensityPlotting.py | 1 | 101746 | import os
import time
import os.path
import sys
import threading
import multiprocessing
import shutil
import math
import pickle
import gc
import h5py
import argparse
import matplotlib
import numpy
matplotlib.use('Agg')
from ctypes import *
from amuse.units import units, constants, nbody_system, quantities
from amuse.units import *
#from amuse.lab import *
from amuse.units.quantities import AdaptingVectorQuantity, VectorQuantity
from amuse.datamodel import Particles, Particle
from amuse.ext import sph_to_star
from amuse.io import write_set_to_file, read_set_from_file
from amuse.plot import scatter, xlabel, ylabel, plot, pynbody_column_density_plot, HAS_PYNBODY, _smart_length_units_for_pynbody_data, convert_particles_to_pynbody_data, UnitlessArgs, semilogx, semilogy, loglog, xlabel, ylabel
from matplotlib import pyplot
import pynbody
import pynbody.plot.sph as pynbody_sph
from pynbody.analysis import angmom
from amuse.plot import scatter, xlabel, ylabel, plot, native_plot, sph_particles_plot, circle_with_radius, axvline
from BinaryCalculations import *
class Star:
def __init__(self, particle1,particle2):
if particle1 != None and particle2 != None:
self.Star(particle1,particle2)
else:
self.position = (0.0, 0.0, 0.0) | units.m
self.vx, self.vy, self.vz = (0.0 , 0.0, 0.0 ) | units.m / units.s
self.v = 0.0 | units.m / units.s
self.mass = 0.0 | units.kg
def Star(self,particle1,particle2):
particles = Particles()
part1=particle1.copy()
particles.add_particle(part1)
part2 = Particle()
part2.mass = particle2.mass
part2.position = particle2.position
part2.velocity = particle2.velocity
part2.radius = particle2.radius
particles.add_particle(part2)
self.velocity = particles.center_of_mass_velocity()
self.vx = self.velocity[0]
self.vy = self.velocity[1]
self.vz = self.velocity[2]
self.position = particles.center_of_mass()
self.x = self.position[0]
self.y = self.position[1]
self.z = self.position[2]
self.mass = particles.total_mass()
self.velocityDifference = CalculateVelocityDifference(particle1,particle2)
self.separation = CalculateSeparation(particle1,particle2)
self.specificEnergy = CalculateSpecificEnergy(self.velocityDifference,self.separation,particle1,particle2)
print("inner specific energy: ", self.specificEnergy)
self.potentialEnergy = particles.potential_energy()
self.kineticEnergy = particles.kinetic_energy()
particles.move_to_center()
self.angularMomentum = particles.total_angular_momentum()
self.omega = CalculateOmega(particles)
class SphGiant:
def __init__(self, gas_particles_file, dm_particles_file, opposite= False):
self.gasParticles = read_set_from_file(gas_particles_file, format='amuse')
if dm_particles_file is not None:
dms = read_set_from_file(dm_particles_file, format='amuse')
if opposite: #core is the first particle
self.core = dms[0]
else:
self.core = dms[-1]
else:
self.core = Particle()
self.core.mass = 0 | units.MSun
self.core.position = (0.0, 0.0, 0.0) | units.AU
self.core.vx = 0.0| units.m/units.s
self.core.vy = 0.0 | units.m/units.s
self.core.vz = 0.0 | units.m/units.s
self.core.radius = 0.0 | units.RSun
self.gas = Star(None, None)
self.gas.mass = self.gasParticles.total_mass()
self.gas.position = self.gasParticles.center_of_mass()
self.gas.x , self.gas.y, self.gas.z = self.gasParticles.center_of_mass()
self.gas.velocity = self.gasParticles.center_of_mass_velocity()
#self.gas.vx, self.gas.vy, self.gas.vz = self.gasParticles.center_of_mass_velocity()
self.gas.v = self.gas.velocity
self.mass = self.gas.mass + self.core.mass
self.position = (self.gas.position * self.gas.mass + self.core.position* self.core.mass)/self.mass
self.velocity = (self.gas.velocity * self.gas.mass + self.core.velocity* self.core.mass)/self.mass
self.x , self.y, self.z = self.position
self.vx, self.vy, self.vz = self.velocity
self.v = self.velocity
self.radius = self.gasParticles.total_radius()
self.dynamicalTime = 1.0/(constants.G*self.mass/((4*constants.pi*self.radius**3)/3))**0.5
self.kineticEnergy = 0.0 |units.kg*(units.km**2) / units.s**2
self.thermalEnergy = 0.0 |units.kg*(units.km**2) / units.s**2
self.potentialEnergy = 0.0 |units.kg*(units.km**2) / units.s**2
self.gasPotential= 0.0 | units.kg * (units.km ** 2) / units.s ** 2
self.gasKinetic = 0.0 | units.kg * (units.km ** 2) / units.s ** 2
self.omegaPotential = 0.0 | units.kg * (units.km ** 2) / units.s ** 2
#self.angularMomentum = totalGiant.total_angular_momentum()
def CalculateEnergies(self,comV=None):
self.gasKinetic = self.gasParticles.kinetic_energy()
self.coreKinetic = 0.5 * self.core.mass * (CalculateVectorSize(self.core.velocity))**2
self.kineticEnergy = self.gasKinetic + self.coreKinetic
self.thermalEnergy = self.gasParticles.thermal_energy()
print("giant kinetic: ", self.kineticEnergy)
print("giant thermal: ", self.thermalEnergy)
self.gasPotential = self.GasPotentialEnergy()
print("gas potential: ", self.gasPotential)
self.potentialEnergy = self.gasPotential + self.potentialEnergyWithParticle(self.core)
print("giant potential: ", self.potentialEnergy)
#print "potential energies: ", self.gasPotential, self.gasParticles.mass[-1]*self.gas.mass*constants.G/self.radius
#self.potentialEnergy = self.gasPotential + self.potentialEnergyWithParticle(self.core)
def GasPotentialEnergy(self):
return self.gasParticles.potential_energy()
self.gasPotential = 0.0 |units.kg*(units.m**2) / units.s**2
mass = self.gasParticles.mass
x_vector = self.gasParticles.x
y_vector = self.gasParticles.y
z_vector = self.gasParticles.z
epsilon = self.gasParticles.epsilon
for i in range(len(self.gasParticles) - 1):
x = x_vector[i]
y = y_vector[i]
z = z_vector[i]
dx = x - x_vector[i + 1:]
dy = y - y_vector[i + 1:]
dz = z - z_vector[i + 1:]
dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
dr = (dr_squared + epsilon[i+1:]**2).sqrt()
m_m = mass[i] * mass[i + 1:]
energy_of_this_particle = constants.G * ((m_m / dr).sum())
self.gasPotential -= energy_of_this_particle
return self.gasPotential
def potentialEnergyWithParticle(self,particle, epsilon = None):
energy = 0.0 | units.kg*(units.m**2) / units.s**2
for part in self.gasParticles:
if epsilon is not None:
energy += -1.0*constants.G*part.mass*particle.mass/(CalculateVectorSize(CalculateSeparation(particle,part))**2+epsilon**2)**0.5
else:
energy += -1.0*constants.G*part.mass*particle.mass/(CalculateVectorSize(CalculateSeparation(particle,part))**2+part.epsilon**2)**0.5
return energy
def gravityWithParticle(self,particle):
force = VectorQuantity([0.0,0.0,0.0],units.kg*units.km*units.s**-2)
for part in self.gasParticles:
f = -1.0 * constants.G * part.mass * particle.mass / (
(CalculateVectorSize(CalculateSeparation(particle, part)) ** 2) ** 0.5) ** 3
force[0] += f * (part.x-particle.x)
force[1] += f * (part.y - particle.y)
force[2] += f * (part.z - particle.z)
f = -1.0 * constants.G * part.mass * particle.mass / (
(CalculateVectorSize(CalculateSeparation(particle, self.core)) ** 2) ** 0.5) ** 3
force[0] += f * (self.core.x-particle.x)
force[1] += f * (self.core.y - particle.y)
force[2] += f * (self.core.z - particle.z)
return force
def GetAngularMomentum(self,comPos=None,comV=None):
totalGiant = Particles()
totalGiant.add_particles(self.gasParticles)
totalGiant.add_particle(self.core)
totGiant = totalGiant.copy()
if comPos is not None:
totGiant.position -= comPos
if comV is not None:
totGiant.velocity -= comV
self.omegaPotential = CalculateOmega(totGiant)
return totGiant.total_angular_momentum()
def GetAngularMomentumOfGas(self, comPos=None, comV=None):
gas = self.gasParticles.copy()
if comPos is not None:
gas.position -= comPos
if comV is not None:
gas.velocity -= comV
return gas.total_angular_momentum()
def CalculateInnerSPH(self, relativeParticle, localRadius=50.0 | units.RSun, com_position=[0.0,0.0,0.0] | units.m,
com_velocity = [0.0,0.0,0.0] | units.m / units.s):
self.innerGas = Star(None, None)
radius = CalculateVectorSize(CalculateSeparation(relativeParticle, self.core))
print(time.ctime(), "beginning inner gas calculation")
self.CalculateSphMassVelocityAndPositionInsideRadius(radius, includeCore=True, centeralParticle=relativeParticle,
localRadius=localRadius, com_position_for_angular_momenta=com_position,
com_velocity_for_angular_momenta=com_velocity)
self.innerGas.x , self.innerGas.y, self.innerGas.z = self.innerGas.position
self.innerGas.kineticEnergy = 0.5*self.innerGas.mass*CalculateVectorSize(self.innerGas.v)**2
print(time.ctime(), "calculated!")
def CalculateTotalGasMassInsideRadius(self, radius):
innerMass = self.core.mass
for particle in self.gasParticles:
separation = CalculateVectorSize(CalculateSeparation(particle, self.core))
if separation < radius:
innerMass += particle.mass
return innerMass
def CalculateSphMassVelocityAndPositionInsideRadius(self,radius,includeCore=True,centeralParticle=None,
localRadius=0.0 | units.RSun,
com_position_for_angular_momenta=[0.0,0.0,0.0] | units.m,
com_velocity_for_angular_momenta = [0.0,0.0,0.0] | units.m / units.s):
self.innerGas.vxTot , self.innerGas.vyTot , self.innerGas.vzTot = ( 0.0 , 0.0, 0.0 )| units.m * units.s**-1
self.innerGas.xTot , self.innerGas.yTot , self.innerGas.zTot = ( 0.0 , 0.0, 0.0 )| units.m
self.innerGas.lxTot, self.innerGas.lyTot, self.innerGas.lzTot = (0.0, 0.0, 0.0) | (units.g * units.m**2 * units.s ** -1)
self.localMass = 0.0 | units.MSun
if includeCore:
self.innerGas.mass = self.core.mass
cmass = self.core.mass.value_in(units.MSun)
vx = self.core.vx
vy= self.core.vy
vz = self.core.vz
x = self.core.x
y = self.core.y
z = self.core.z
else:
cmass = 0.0 | units.MSun
vx = 0.0 | units.m / units.s
vy= 0.0 | units.m / units.s
vz = 0.0 | units.m / units.s
x = 0.0 | units.AU
y = 0.0 | units.AU
z = 0.0 | units.AU
velocityAndMass = [vx * cmass, vy* cmass, vz * cmass]
positionAndMass = [x * cmass, y * cmass, z * cmass]
angularmomentum = CalculateSpecificMomentum((x,y,z),(vx,vy,vz))
if cmass != 0.0:
angularmomentum = [l*(cmass |units.MSun) for l in angularmomentum]
particlesAroundCore = 0
particlesAroundCenteral = 0
i = 0
for particle in self.gasParticles:
#print i
i += 1
separationFromCore = CalculateVectorSize(CalculateSeparation(particle, self.core))
if separationFromCore < radius:
pmass = particle.mass.value_in(units.MSun)
self.innerGas.mass += particle.mass
velocityAndMass[0] += particle.vx * pmass
velocityAndMass[1] += particle.vy * pmass
velocityAndMass[2] += particle.vz * pmass
positionAndMass[0] += particle.x * pmass
positionAndMass[1] += particle.y * pmass
positionAndMass[2] += particle.z * pmass
angularmomentumOfParticle = CalculateSpecificMomentum(particle.position-com_position_for_angular_momenta,
particle.velocity-com_velocity_for_angular_momenta)
angularmomentum[0] += angularmomentumOfParticle[0] * particle.mass
angularmomentum[1] += angularmomentumOfParticle[1] * particle.mass
angularmomentum[2] += angularmomentumOfParticle[2] * particle.mass
particlesAroundCore += 1
if centeralParticle != None:
separationFromCentral = CalculateVectorSize(CalculateSeparation(particle, centeralParticle))
if separationFromCentral < localRadius:
self.localMass += particle.mass
particlesAroundCenteral += 1
print(time.ctime(), particlesAroundCore, particlesAroundCenteral)
if particlesAroundCore > 0:
totalMass= self.innerGas.mass.value_in(units.MSun)
self.innerGas.vxTot = velocityAndMass[0] / totalMass
self.innerGas.vyTot = velocityAndMass[1] / totalMass
self.innerGas.vzTot = velocityAndMass[2] / totalMass
self.innerGas.xTot = positionAndMass[0] / totalMass
self.innerGas.yTot = positionAndMass[1] / totalMass
self.innerGas.zTot = positionAndMass[2] / totalMass
self.innerGas.lxTot = angularmomentum[0]
self.innerGas.lyTot = angularmomentum[1]
self.innerGas.lzTot = angularmomentum[2]
self.innerGas.v = (self.innerGas.vxTot, self.innerGas.vyTot, self.innerGas.vzTot)
self.innerGas.position = (self.innerGas.xTot, self.innerGas.yTot, self.innerGas.zTot)
self.innerGas.angularMomentum = (self.innerGas.lxTot, self.innerGas.lyTot, self.innerGas.lzTot)
if particlesAroundCenteral > 0:
self.localDensity = self.localMass / ((4.0*constants.pi*localRadius**3)/3.0)
else:
self.localDensity = 0.0 | units.g / units.m**3
def CountLeavingParticlesInsideRadius(self, com_position= [0.0,0.0,0.0] | units.m,
com_velocity=[0.0,0.0,0.0] | units.m / units.s, companion = None, method="estimated"):
self.leavingParticles = 0
self.totalUnboundedMass = 0 | units.MSun
dynamicalVelocity= self.radius/self.dynamicalTime
particlesExceedingMaxVelocity = 0
velocityLimitMax = 0.0 | units.cm/units.s
gas = self.gasParticles.copy()
gas.position -= com_position
gas.velocity -= com_velocity
specificKinetics = gas.specific_kinetic_energy()
com_particle = Particle(mass=self.mass)
com_particle.position = com_position
com_particle.velocity = com_velocity
extra_potential = [0.0 | units.erg / units.g for particle in self.gasParticles]
if companion is not None:
com_particle.mass += companion.mass
extra_potential = [CalculatePotentialEnergy(particle, companion) / particle.mass for particle in
self.gasParticles]
print("using method ", method)
if method == "estimated":
specificEnergy = [CalculateSpecificEnergy(particle.velocity - com_velocity, particle.position - com_position,
particle, com_particle) for particle in self.gasParticles]
else:
self.CalculateGasSpecificPotentials()
specificEnergy = [self.gasSpesificPotentials[i] + CalculatePotentialEnergy(self.gasParticles[i],
self.core) / self.gasParticles[i].mass \
+ extra_potential[i] + specificKinetics[i] for i in range(len(self.gasParticles))]
for i, particle in enumerate(self.gasParticles):
volume = (4.0 / 3.0) * constants.pi * particle.radius ** 3
particleSoundSpeed = ((5.0 / 3.0) * particle.pressure / (particle.mass / volume)) ** 0.5
velocityLimit = min(dynamicalVelocity, particleSoundSpeed)
velocityLimitMax = max(velocityLimitMax,velocityLimit)
if CalculateVectorSize(particle.velocity) > velocityLimit:
particlesExceedingMaxVelocity += 1
if specificEnergy[i] > 0 | specificEnergy[i].unit:
self.leavingParticles += 1
self.totalUnboundedMass += particle.mass
print("over speed ", particlesExceedingMaxVelocity*100.0 / len(self.gasParticles), "limit: ", velocityLimitMax)
return self.leavingParticles
def CalculateGasSpecificPotentials(self):
n = len(self.gasParticles)
max = 100000 * 100 # 100m floats
block_size = max // n
if block_size == 0:
block_size = 1 # if more than 100m particles, then do 1 by one
mass = self.gasParticles.mass
x_vector = self.gasParticles.x
y_vector = self.gasParticles.y
z_vector = self.gasParticles.z
potentials = VectorQuantity.zeros(len(mass), mass.unit / x_vector.unit)
inf_len = numpy.inf | x_vector.unit
offset = 0
newshape = (n, 1)
x_vector_r = x_vector.reshape(newshape)
y_vector_r = y_vector.reshape(newshape)
z_vector_r = z_vector.reshape(newshape)
mass_r = mass.reshape(newshape)
while offset < n:
if offset + block_size > n:
block_size = n - offset
x = x_vector[offset:offset + block_size]
y = y_vector[offset:offset + block_size]
z = z_vector[offset:offset + block_size]
indices = numpy.arange(block_size)
dx = x_vector_r - x
dy = y_vector_r - y
dz = z_vector_r - z
dr_squared = (dx * dx) + (dy * dy) + (dz * dz)
dr = (dr_squared).sqrt()
index = (indices + offset, indices)
dr[index] = inf_len
potentials += (mass[offset:offset + block_size] / dr).sum(axis=1)
offset += block_size
self.gasSpesificPotentials = -constants.G * potentials
def FindSmallestCell(self):
smallestRadius = self.gasParticles.total_radius()
for gasParticle in self.gasParticles:
if gasParticle.radius < smallestRadius:
smallestRadius = gasParticle.radius
return smallestRadius
def FindLowestNumberOfNeighbours(self):
numberOfNeighbours = len(self.gasParticles)
for gasParticle in self.gasParticles:
if gasParticle.num_neighbours < numberOfNeighbours:
numberOfNeighbours = gasParticle.num_neighbours
return numberOfNeighbours
def CalculateQuadropoleMoment(self):
Qxx = (self.core.mass * (self.core.ax*self.core.x + 2 * self.core.vx * self.core.vx +
self.core.x * self.core.ax - (2.0/3.0) * (self.core.ax * self.core.x +
self.core.ay * self.core.y +
self.core.az * self.core.z +
CalculateVectorSize(self.core.velocity)**2)))
Qxy = (self.core.mass * (self.core.ax * self.core.y + 2 * self.core.vx * self.core.vy +
self.core.x * self.core.ay))
Qxz = (self.core.mass * (self.core.ax * self.core.z + 2 * self.core.vx * self.core.vz +
self.core.x * self.core.az))
Qyx = (self.core.mass * (self.core.ay * self.core.x + 2 * self.core.vy * self.core.vx +
self.core.y * self.core.ax))
Qyy = (self.core.mass * (self.core.ay*self.core.y + 2 * self.core.vy * self.core.vy +
self.core.y * self.core.ay - (2.0/3.0) * (self.core.ax * self.core.x +
self.core.ay * self.core.y +
self.core.az * self.core.z +
CalculateVectorSize(self.core.velocity)**2)))
Qyz = (self.core.mass * (self.core.ay * self.core.z + 2 * self.core.vy * self.core.vz +
self.core.y * self.core.az))
Qzx = (self.core.mass * (self.core.az * self.core.x + 2 * self.core.vz * self.core.vx +
self.core.z * self.core.ax))
Qzy = (self.core.mass * (self.core.az * self.core.y + 2 * self.core.vz * self.core.vy +
self.core.z * self.core.ay))
Qzz = (self.core.mass * (self.core.az * self.core.z + 2 * self.core.vz * self.core.vz +
self.core.z * self.core.az - (2.0/3.0) * (self.core.ax * self.core.x +
self.core.ay * self.core.y +
self.core.az * self.core.z +
CalculateVectorSize(self.core.velocity)**2)))
for gasParticle in self.gasParticles:
Qxx += (gasParticle.mass * (gasParticle.ax*gasParticle.x + 2 * gasParticle.vx * gasParticle.vx +
gasParticle.x * gasParticle.ax - (2.0/3.0) * (gasParticle.ax * gasParticle.x +
gasParticle.ay * gasParticle.y +
gasParticle.az * gasParticle.z +
CalculateVectorSize(gasParticle.velocity)**2)))
Qxy += (gasParticle.mass * (gasParticle.ax * gasParticle.y + 2 * gasParticle.vx * gasParticle.vy +
gasParticle.x * gasParticle.ay))
Qxz += (gasParticle.mass * (gasParticle.ax * gasParticle.z + 2 * gasParticle.vx * gasParticle.vz +
gasParticle.x * gasParticle.az))
Qyx += (gasParticle.mass * (gasParticle.ay * gasParticle.x + 2 * gasParticle.vy * gasParticle.vx +
gasParticle.y * gasParticle.ax))
Qyy += (gasParticle.mass * (gasParticle.ay*gasParticle.y + 2 * gasParticle.vy * gasParticle.vy +
gasParticle.y * gasParticle.ay - (2.0/3.0) * (gasParticle.ax * gasParticle.x +
gasParticle.ay * gasParticle.y +
gasParticle.az * gasParticle.z +
CalculateVectorSize(gasParticle.velocity)**2)))
Qyz += (gasParticle.mass * (gasParticle.ay * gasParticle.z + 2 * gasParticle.vy * gasParticle.vz +
gasParticle.y * gasParticle.az))
Qzx += (gasParticle.mass * (gasParticle.az * gasParticle.x + 2 * gasParticle.vz * gasParticle.vx +
gasParticle.z * gasParticle.ax))
Qzy += (gasParticle.mass * (gasParticle.az * gasParticle.y + 2 * gasParticle.vz * gasParticle.vy +
gasParticle.z * gasParticle.ay))
Qzz += (gasParticle.mass * (gasParticle.az * gasParticle.z + 2 * gasParticle.vz * gasParticle.vz +
gasParticle.z * gasParticle.az - (2.0/3.0) * (gasParticle.ax * gasParticle.x +
gasParticle.ay * gasParticle.y +
gasParticle.az * gasParticle.z +
CalculateVectorSize(gasParticle.velocity)**2)))
return Qxx.value_in(units.m**2 * units.kg * units.s**-2),Qxy.value_in(units.m**2 * units.kg * units.s**-2),\
Qxz.value_in(units.m**2 * units.kg * units.s**-2),Qyx.value_in(units.m**2 * units.kg * units.s**-2),\
Qyy.value_in(units.m**2 * units.kg * units.s**-2),Qyz.value_in(units.m**2 * units.kg * units.s**-2),\
Qzx.value_in(units.m**2 * units.kg * units.s**-2),Qzy.value_in(units.m**2 * units.kg * units.s**-2),\
Qzz.value_in(units.m**2 * units.kg * units.s**-2)
class MultiProcessArrayWithUnits:
def __init__(self,size,units):
self.array = multiprocessing.Array('f', [-1.0 for i in range(size)])
self.units = units
def plot(self, filename, outputDir,timeStep, beginStep, toPlot):
if self.units is None:
array = [a for a in self.array]
else:
array = AdaptingVectorQuantity([a for a in self.array], self.units)
Plot1Axe(array,filename, outputDir,timeStep, beginStep, toPlot)
def LoadBinaries(file, opposite= False):
load = read_set_from_file(file, format='amuse')
#print load
if not opposite:
stars = Particles(2, particles= [load[0], load[1]])
else: #take the next
stars = Particles(2, particles= [load[1], load[2]])
return stars
def CalculateQuadropoleMomentOfParticle(particle):
Qxx = (particle.mass * (particle.ax*particle.x + 2 * particle.vx * particle.vx +
particle.x * particle.ax - (2.0/3.0) * (particle.ax * particle.x +
particle.ay * particle.y +
particle.az * particle.z +
CalculateVectorSize(particle.velocity)**2))).value_in(units.m**2 * units.kg * units.s**-2)
Qxy = (particle.mass * (particle.ax * particle.y + 2 * particle.vx * particle.vy +
particle.x * particle.ay)).value_in(units.m**2 * units.kg * units.s**-2)
Qxz = (particle.mass * (particle.ax * particle.z + 2 * particle.vx * particle.vz +
particle.x * particle.az)).value_in(units.m**2 * units.kg * units.s**-2)
Qyx = (particle.mass * (particle.ay * particle.x + 2 * particle.vy * particle.vx +
particle.y * particle.ax)).value_in(units.m**2 * units.kg * units.s**-2)
Qyy = (particle.mass * (particle.ay*particle.y + 2 * particle.vy * particle.vy +
particle.y * particle.ay - (2.0/3.0) * (particle.ax * particle.x +
particle.ay * particle.y +
particle.az * particle.z +
CalculateVectorSize(particle.velocity)**2))).value_in(units.m**2 * units.kg * units.s**-2)
Qyz = (particle.mass * (particle.ay * particle.z + 2 * particle.vy * particle.vz +
particle.y * particle.az)).value_in(units.m**2 * units.kg * units.s**-2)
Qzx = (particle.mass * (particle.az * particle.x + 2 * particle.vz * particle.vx +
particle.z * particle.ax)).value_in(units.m**2 * units.kg * units.s**-2)
Qzy = (particle.mass * (particle.az * particle.y + 2 * particle.vz * particle.vy +
particle.z * particle.ay)).value_in(units.m**2 * units.kg * units.s**-2)
Qzz = (particle.mass * (particle.az * particle.z + 2 * particle.vz * particle.vz +
particle.z * particle.az - (2.0/3.0) * (particle.ax * particle.x +
particle.ay * particle.y +
particle.az * particle.z +
CalculateVectorSize(particle.velocity)**2))).value_in(units.m**2 * units.kg * units.s**-2)
return float(Qxx),float(Qxy),float(Qxz),float(Qyx),float(Qyy),float(Qyz),float(Qzx),float(Qzy),float(Qzz)
def GetPropertyAtRadius(mesaStarPropertyProfile, mesaStarRadiusProfile, radius):
profileLength = len(mesaStarRadiusProfile)
i = 0
while i < profileLength and mesaStarRadiusProfile[i] < radius:
i += 1
return mesaStarPropertyProfile[min(i, profileLength - 1)]
def CalculateCumulantiveMass(densityProfile, radiusProfile):
profileLength = len(radiusProfile)
cmass = [densityProfile[0] * 4.0/3.0 * constants.pi * radiusProfile[0] ** 3 for i in xrange(profileLength)]
for i in xrange(1, profileLength):
dr = radiusProfile[i] - radiusProfile[i-1]
cmass[i] = (cmass[i-1] + densityProfile[i] * 4.0 * constants.pi*(radiusProfile[i] ** 2) * dr)
vectormass = [m.value_in(units.MSun) for m in cmass]
return vectormass
def CalculateTau(densityProfile, radiusProfile, coreRadius, coreDensity,temperatureProfile, edgeRadius):
profileLength = len(radiusProfile)
radiusIndex = 0
while radiusProfile[radiusIndex] < edgeRadius:
radiusIndex += 1
X= 0.73
Y = 0.25
Z= 0.02
#kappa = 0.2 * (1 + X) | (units.cm**2)*(units.g**-1)
kappa = 12.0 | units.cm**2 / units.g
#kappa = (3.8*10**22)*(1 + X)* (X + Y)* densityProfile * temperatureProfile**(-7.0/2)
#print kappa
tauPoint = [kappa * densityProfile[i] * (radiusProfile[i+1] - radiusProfile[i]) for i in xrange(0, radiusIndex)]
tauPoint.append((0.0 |(units.g*units.cm**-2))*kappa)
tau = tauPoint
tau[radiusIndex] = tauPoint[radiusIndex]
for i in xrange(radiusIndex - 1, 0 , -1 ):
tau[i] = tau[i + 1] + tauPoint[i]
#print tau[-1], tau[-100]
i = radiusIndex
while (tau[i] < 2.0/3.0 and i >= 0):
i -= 1
j = radiusIndex
while (tau[j] < 13.0 and j >= 0):
j -= 1
#print "edge: ", radiusProfile[radiusIndex].as_quantity_in(units.RSun), " at index= ",radiusIndex, " photosphere radius: ", \
# radiusProfile[i].as_quantity_in(units.RSun), " at index= ", i, "tau is 13 at radius= ", radiusProfile[j].as_quantity_in(units.RSun)
return tau
def mu(X = None, Y = 0.25, Z = 0.02, x_ion = 0.1):
"""
Compute the mean molecular weight in kg (the average weight of particles in a gas)
X, Y, and Z are the mass fractions of Hydrogen, of Helium, and of metals, respectively.
x_ion is the ionisation fraction (0 < x_ion < 1), 1 means fully ionised
"""
if X is None:
X = 1.0 - Y - Z
elif abs(X + Y + Z - 1.0) > 1e-6:
print("Error in calculating mu: mass fractions do not sum to 1.0")
return constants.proton_mass / (X*(1.0+x_ion) + Y*(1.0+2.0*x_ion)/4.0 + Z*x_ion/2.0)
def structure_from_star(star):
radius_profile = star.radius
density_profile = star.rho
if hasattr(star, "get_mass_profile"):
mass_profile = star.dmass * star.mass
else:
radii_cubed = radius_profile**3
radii_cubed.prepend(0|units.m**3)
mass_profile = (4.0/3.0 * constants.pi) * density_profile * (radii_cubed[1:] - radii_cubed[:-1])
cumulative_mass_profile = CalculateCumulantiveMass(density_profile, radius_profile)
tau = CalculateTau(density_profile, radius_profile, 0.0159 | units.RSun, (0.392|units.MSun)/((4.0/3.0)*constants.pi*(0.0159 |units.RSun)**3), star.temperature, radius_profile[-100])
sound_speed = star.temperature / star.temperature | units.cm / units.s
for i in xrange(len(star.temperature)):
sound_speed[i] = math.sqrt(((5.0/3.0) * constants.kB * star.temperature[i] / mu()).value_in(units.m **2 * units.s**-2)) | units.m / units.s
return dict(
radius = radius_profile.as_quantity_in(units.RSun),
density = density_profile,
mass = mass_profile,
temperature = star.temperature,
pressure = star.pressure,
sound_speed = sound_speed,
cumulative_mass = cumulative_mass_profile,
tau = tau
)
def velocity_softening_distribution(sphGiant,step,outputDir):
sorted = sphGiant.gasParticles.pressure.argsort()[::-1]
binned = sorted.reshape((-1, 1))
velocities = sphGiant.gasParticles.velocity[binned].sum(axis=1)
textFile = open(outputDir + '/radial_profile/velocities_{0}'.format(step) + '.txt', 'w')
textFile.write(', '.join([str(CalculateVectorSize(v)) for v in velocities]))
textFile.close()
h = sphGiant.gasParticles.radius[binned].sum(axis=1)
textFile = open(outputDir + '/radial_profile/softenings_{0}'.format(step) + '.txt', 'w')
textFile.write(', '.join([str(r) for r in h]))
textFile.close()
def temperature_density_plot(sphGiant, step, outputDir, toPlot = False, plotDust= False, dustRadius= 0.0 | units.RSun):
if not HAS_PYNBODY:
print("problem plotting")
return
width = 5.0 | units.AU
length_unit, pynbody_unit = _smart_length_units_for_pynbody_data(width)
sphGiant.gasParticles.temperature = 2.0/3.0 * sphGiant.gasParticles.u * mu() / constants.kB
sphGiant.gasParticles.mu = mu()
if sphGiant.core.mass > 0.0 | units.MSun:
star = sph_to_star.convert_SPH_to_stellar_model(sphGiant.gasParticles, core_particle=sphGiant.core, particles_per_zone= 1 )#TODO: surround it by a code which adds the density of the core from mesa.
else:
star = sph_to_star.convert_SPH_to_stellar_model(sphGiant.gasParticles)
data = structure_from_star(star)
#sphGiant.gasParticles.radius = CalculateVectorSize((sphGiant.gasParticles.x,sphGiant.gasParticles.y,sphGiant.gasParticles.z))
#data = convert_particles_to_pynbody_data(sphGiant.gasParticles, length_unit, pynbody_unit)
#plot to file
print("writing data to files")
textFile = open(outputDir + '/radial_profile/temperature_{0}'.format(step) + '.txt', 'w')
textFile.write(', '.join([str(y) for y in data["temperature"]]))
textFile.close()
textFile = open(outputDir + '/radial_profile/density_{0}'.format(step) + '.txt', 'w')
textFile.write(', '.join([str(y) for y in data["density"]]))
textFile.close()
textFile = open(outputDir + '/radial_profile/radius_{0}'.format(step) + '.txt', 'w')
textFile.write(', '.join([str(y) for y in data["radius"]]))
textFile.close()
textFile = open(outputDir + '/radial_profile/sound_speed_{0}'.format(step) + '.txt', 'w')
textFile.write(', '.join([str(y) for y in data["sound_speed"]]))
textFile.close()
textFile = open(outputDir + '/radial_profile/mass_profile{0}'.format(step) + '.txt', 'w')
textFile.write(', '.join([str(y) for y in data["mass"]]))
textFile.close()
textFile = open(outputDir + '/radial_profile/cumulative_mass_profile{0}'.format(step) + '.txt', 'w')
textFile.write(', '.join([str(y) for y in data["cumulative_mass"]]))
textFile.close()
velocity_softening_distribution(sphGiant,step,outputDir)
if toPlot:
figure = pyplot.figure(figsize = (8, 10))
pyplot.subplot(1, 1, 1)
ax = pyplot.gca()
plotT = semilogy(data["radius"], data["temperature"], 'r-', label = r'$T(r)$', linewidth=3.0)
xlabel('Radius', fontsize=24.0)
ylabel('Temperature', fontsize= 24.0)
ax.twinx()
#plotrho = semilogy(data["radius"][:-1000], data["density"][:-1000].as_quantity_in(units.g * units.cm **-3), 'g-', label = r'$\rho(r)$', linewidth=3.0)
plotrho = semilogy(data["radius"], data["density"].as_quantity_in(units.g * units.cm **-3), 'g-', label = r'$\rho(r)$', linewidth=3.0)
plots = plotT + plotrho
labels = [one_plot.get_label() for one_plot in plots]
ax.legend(plots, labels, loc=3)
ax.labelsize=20.0
ax.titlesize=24.0
ylabel('Density')
#print "saved"
pyplot.legend()
pyplot.xticks(fontsize=20.0)
pyplot.yticks(fontsize=20.0)
pyplot.suptitle('Structure of a {0} star'.format(sphGiant.mass))
pyplot.savefig(outputDir + "/radial_profile/temperature_radial_proile_{0}.jpg".format(step), format='jpeg')
#pyplot.close(figure)
pyplot.clf()
pyplot.cla()
'''
figure = pyplot.figure(figsize = (15, 11))
#pyplot.subplot(1, 1,1)
ax = pyplot.gca()
pyplot.axes()
plotC = semilogx(data["radius"][:-1000], (data["cumulative_mass"]/data["mass"][-1])[:-1000], 'r-', label = r'$Mint(r)/Mtot$',linewidth=3.0)
print (data["radius"])[-1000]
ax.twinx()
plotRc= axvline(340.0 | units.RSun, linestyle='dashed', label = r'$Rc$',linewidth=3.0)
legend = ax.legend(labels=[r'$Mint(r)/Mtot$', r'$Rc$'],ncol=3, loc=4, fontsize= 24.0)
ax.set_yticklabels([])
ax.set_ylabel('')
ax.set_xlabel('Radius [RSun]')
loc = legend._get_loc()
print loc
xlabel('Radius', fontsize=24.0)
ylabel('')
ax.set_ylabel('')
#ylabel('Cumulative Mass to Total Mass Ratio', fontsize=24.0)
#pyplot.xlim(10,10000)
pyplot.xlabel('Radius', fontsize=24.0)
pyplot.xticks(fontsize = 20.0)
#ax.set_xticklabels([10^1,10^2,10^3,10^4,10^5],fontsize=20)
pyplot.yticks(fontsize= 20.0)
pyplot.ylabel('')
ax.set_ylabel('Cumulative Mass to Total Mass Ratio')
ax.yaxis.set_label_coords(-0.1,0.5)
#pyplot.ylabel('Cumulative Mass to Total Mass Ratio')
#pyplot.axes.labelsize = 24.0
#pyplot.axes.titlesize = 24.0
pyplot.legend(bbox_to_anchor=(1.0,0.2),loc=0, fontsize=24.0)
matplotlib.rcParams.update({'font.size': 20})
pyplot.tick_params(axis='y', which='both', labelleft='on', labelright='off')
#pyplot.rc('text', usetex=True)
#pyplot.suptitle('Cumulative mass ratio of {0} MSun Red Giant Star as a function of the distance from its core'.format(int(sphGiant.mass.value_in(units.MSun) * 100) / 100.0), fontsize=24)
pyplot.savefig(outputDir + "/radial_profile/cumulative_mass_radial_proile_{0}".format(step))
'''
pyplot.close()
if plotDust:
print("calculating values")
mdot = (4.0 * constants.pi * (dustRadius)**2 * GetPropertyAtRadius(data["density"],data["radius"], dustRadius) * GetPropertyAtRadius(data["sound_speed"],data["radius"],dustRadius)).as_quantity_in(units.MSun / units.yr)
m = GetPropertyAtRadius(data["cumulative_mass"], data["radius"], dustRadius)
M = GetPropertyAtRadius(data["cumulative_mass"], data["radius"], 7000.0 | units.RSun)
print("Mdot at 340: ", mdot)
print("cs at 340: ", GetPropertyAtRadius(data["sound_speed"],data["radius"], dustRadius))
#print "tau at 3000: ", GetPropertyAtRadius(data["tau"],data["radius"], 3000.0 | units.RSun)
print("density at 340: ", GetPropertyAtRadius(data["density"],data["radius"], dustRadius))
print("m over 340: ", (M - m))
print("M total: ", M)
print("time: ", ((M-m)/mdot))
def PlotDensity(sphGiant,core,binary,i, outputDir, vmin, vmax, plotDust=False, dustRadius=700 | units.RSun, width = 4.0 | units.AU, side_on = False, timeStep = 0.2):
if not HAS_PYNBODY:
print("problem plotting")
return
#width = 0.08 * sphGiant.position.lengths_squared().amax().sqrt()
#width = 5.0 * sphGiant.position.lengths_squared().amax().sqrt()
#width = 4.0 | units.AU
length_unit, pynbody_unit = _smart_length_units_for_pynbody_data(width)
pyndata = convert_particles_to_pynbody_data(sphGiant, length_unit, pynbody_unit)
UnitlessArgs.strip([1]|length_unit, [1]|length_unit)
if not side_on:
with angmom.faceon(pyndata, cen=[0.0,0.0,0.0], vcen=[0.0,0.0,0.0]):
pynbody_sph.image(pyndata, resolution=2000,width=width.value_in(length_unit), units='g cm^-3',
vmin= vmin, vmax= vmax, cmap="hot", title = str(i * timeStep) + " days")
UnitlessArgs.current_plot = native_plot.gca()
'''native_plot.xlim(xmax=2, xmin=-10)
native_plot.ylim(ymax=6, ymin=-6)
native_plot.xticks([-10,-8,-6,-4,-2,0,2],[-6,-4,-2,0,2,4,6])'''
native_plot.ylabel('y[AU]')
yLabel = 'y[AU]'
#pyplot.xlim(-5,-2)
if core.mass != 0 | units.MSun:
if core.x >= -1* width / 2.0 and core.x <= width/ 2.0 and core.y >= -1 * width/ 2.0 and core.y <= width / 2.0:
#both coordinates are inside the boundaries- otherwise dont plot it
scatter(core.x, core.y, c="r")
scatter(binary.x, binary.y, c="w")
#pyplot.xlim(-930, -350)
#pyplot.ylim(-190,390)
if plotDust:
circle_with_radius(core.x, core.y,dustRadius, fill=False, color='white', linestyle= 'dashed', linewidth=3.0)
else:
outputDir += "/side_on"
pyplot.rc('font',family='Serif',size=30)
fig, (face, side) = pyplot.subplots(nrows=1,ncols=2, figsize=(40,14))
fig.subplots_adjust(wspace=0.01)
with angmom.faceon(pyndata, cen=[0.0, 0.0, 0.0], vcen=[0.0, 0.0, 0.0]):
pynbody_sph.image(pyndata, subplot=face, resolution=2000, width=width.value_in(length_unit),
units='g cm^-3',show_cbar=False, clear=False,
vmin=vmin, vmax=vmax, cmap="hot")
face.set_adjustable('box-forced')
if core.mass != 0 | units.MSun:
if core.x >= -1* width / 2.0 and core.x <= width/ 2.0 and core.y >= -1 * width/ 2.0 and core.y <= width / 2.0:
#both coordinates are inside the boundaries- otherwise dont plot it
face.scatter(core.x.value_in(units.AU), core.y.value_in(units.AU), c="r")
face.scatter(binary.x.value_in(units.AU), binary.y.value_in(units.AU), c="w")
face.set_ylabel('y[AU]')
face.set_xlabel('x[AU]')
with angmom.sideon(pyndata, cen=[0.0, 0.0, 0.0], vcen=[0.0, 0.0, 0.0]):
im2 = pynbody_sph.image(pyndata, subplot=side, resolution=2000, width=width.value_in(length_unit),
units='g cm^-3', show_cbar=False, ret_im=True, clear=False,
vmin=vmin, vmax=vmax, cmap="hot")
side.set_adjustable('box-forced')
if core.mass != 0 | units.MSun:
if core.x >= -1 * width / 2.0 and core.x <= width / 2.0 and core.z >= -1 * width / 2.0 and core.z <= width / 2.0:
# both coordinates are inside the boundaries- otherwise dont plot it
side.scatter(core.x.value_in(units.AU), core.z.value_in(units.AU), c="r")
side.scatter(binary.x.value_in(units.AU), binary.z.value_in(units.AU), c="w")
side.set_ylabel('z[AU]')
side.set_xlabel('x[AU]')
fig.suptitle(str(i * timeStep) + " days")
cbar = pyplot.colorbar(im2, aspect=10,fraction=0.08,pad=0.01,panchor=(0,0),anchor=(0,0))
cbar.set_label('Density $[g/cm^3]$')
'''
with angmom.sideon(pyndata, cen=[0.0,0.0,0.0], vcen=[0.0,0.0,0.0]):
pynbody_sph.sideon_image(pyndata, resolution=2000,width=width.value_in(length_unit), units='g cm^-3',
vmin= vmin, vmax= vmax, cmap="hot", title = str(i * timeStep) + " days")
UnitlessArgs.current_plot = native_plot.gca()
native_plot.ylabel('z[AU]')
yLabel = 'z[AU]'
if core.mass != 0 | units.MSun:
if core.x >= -1* width / 2.0 and core.x <= width/ 2.0 and core.z >= -1 * width/ 2.0 and core.z <= width / 2.0:
#both coordinates are inside the boundaries- otherwise dont plot it
scatter(core.x, core.z, c="r")
scatter(binary.x, binary.z, c="w")
if plotDust:
circle_with_radius(core.x, core.z,dustRadius, fill=False, color='white', linestyle= 'dashed', linewidth=3.0)
'''
#native_plot.colorbar(fontsize=20.0)
matplotlib.rcParams.update({'font.size': 30, 'font.family': 'Serif'})
pyplot.rcParams.update({'font.size': 30, 'font.family': 'Serif'})
#pyplot.rc('text', usetex=True)
#cbar.ax.set_yticklabels(cbar
# .ax.get_yticklabels(), fontsize=24)
#pyplot.axes.labelsize(24)
pyplot.savefig(outputDir + "/plotting_{0}.jpg".format(i), transparent=False)
pyplot.close()
def PlotVelocity(sphGiant,core,binary,step, outputDir, vmin, vmax, timeStep = 0.2):
if not HAS_PYNBODY:
print(HAS_PYNBODY)
print("problem plotting")
return
width = 1.7 * sphGiant.position.lengths_squared().amax().sqrt()
length_unit, pynbody_unit = _smart_length_units_for_pynbody_data(width)
pyndata = convert_particles_to_pynbody_data(sphGiant, length_unit, pynbody_unit)
UnitlessArgs.strip([1]|length_unit, [1]|length_unit)
pynbody_sph.velocity_image(pyndata, width=width.value_in(length_unit), units='g cm^-3',vmin= vmin, vmax= vmax,
title = str(step * timeStep) + " days")
UnitlessArgs.current_plot = native_plot.gca()
#print core.mass
#if core.mass != 0 |units.MSun:
# scatter(core.x, core.y, c="r")
scatter(core.x, core.y, c="r")
scatter(binary.x, binary.y, c="w")
pyplot.savefig(outputDir + "/velocity/velocity_plotting_{0}.jpg".format(step), transparent=False)
pyplot.close()
def Plot1Axe(x, fileName, outputDir, timeStep= 1400.0/7000.0, beginStep = 0, toPlot=False):
if len(x) == 0:
return
beginTime = beginStep * timeStep
timeLine = [beginTime + time * timeStep for time in xrange(len(x))] | units.day
textFile = open(outputDir + '/' + fileName + 'time_' + str(beginTime) + "_to_" + str(beginTime + (len(x) - 1.0) * timeStep) + 'days.txt', 'w')
textFile.write(', '.join([str(y) for y in x]))
textFile.close()
if toPlot:
native_plot.figure(figsize= (20, 20), dpi= 80)
plot(timeLine,x)
xlabel('time[days]')
native_plot.savefig(outputDir + '/' + fileName + 'time_' + str(beginTime) + "_to_" + str(beginTime + (len(x) - 1.0) * timeStep) + 'days.jpg')
def PlotAdaptiveQuantities(arrayOfValueAndNamePairs, outputDir, beginStep = 0, timeStep= 1400.0/7000.0, toPlot = False):
for a in arrayOfValueAndNamePairs:
if a[0]:
Plot1Axe(a[0], a[1], outputDir, timeStep, beginStep, toPlot)
def PlotEccentricity(eccentricities, outputDir, beginStep = 0, timeStep= 1400.0/7000.0, toPlot = False):
for e in eccentricities:
if e[0] != []:
Plot1Axe(e[0], e[1], outputDir, timeStep, beginStep, toPlot)
def PlotBinaryDistance(distances, outputDir, beginStep = 0, timeStep= 1400.0/7000.0, toPlot = False):
for d in distances:
if d[0]:
Plot1Axe(d[0], d[1], outputDir, timeStep, beginStep, toPlot)
def PlotQuadropole(Qxx,Qxy,Qxz,Qyx, Qyy,Qyz,Qzx,Qzy,Qzz, outputDir = 0, timeStep = 1400.0/70000.0, beginStep = 0):
if len(Qxx) == 0:
return
beginTime = beginStep * timeStep
timeLine = [beginTime + time * timeStep for time in xrange(len(Qxx))] | units.day
textFile = open(outputDir + '/quadropole_time_' + str(beginTime) + "_to_" + str(beginTime + (len(Qxx) - 1.0) * timeStep) + 'days.txt', 'w')
textFile.write("Qxx,Qxy,Qxz,Qyx,Qyy,Qyz,Qzx,Qzy,Qzz\r\n")
for i in xrange(0, len(Qxx)):
textFile.write(' ,'.join([str(Qxx[i] * 10**40), str(Qxy[i] * 10**40), str(Qxz[i] * 10**40),
str(Qyx[i] * 10**40), str(Qyy[i] * 10**40), str(Qyz[i] * 10**40),
str(Qzx[i] * 10**40), str(Qzy[i] * 10**40), str(Qzz[i] * 10**40)]))
textFile.write('\r\n')
textFile.close()
def AnalyzeBinaryChunk(savingDir,gasFiles,dmFiles,outputDir,chunk, vmin, vmax, beginStep, binaryDistances,binaryDistancesUnits,
semmimajors,semmimajorsUnits, eccentricities, innerMass, innerMassUnits,
pGas, pGasUnits, pGiant, pGiantUnits, pCompCore, pCompCoreUnits, pTot, pTotUnits,
kGas, kGasUnits, uGiant, uGiantUnits, kCore, kCoreUnits,
kComp, kCompUnits, eTot, eTotUnits,
innerAngularMomenta,
innerAngularMomentaUnits, companionAngularMomenta, companionAngularMomentaUnits,
giantAngularMomenta, giantAngularMomentaUnits,
gasAngularMomenta, gasAngularMomentaUnits,
angularCores, angularCoresUnits,
totAngularMomenta, totAngularMomentaUnits,
massLoss, massLossUnits,
Qxx,Qxy,Qxz,Qyx,Qyy,Qyz,Qzx,Qzy,Qzz,
toPlot = False, plotDust=False, dustRadius= 340.0 | units.RSun, massLossMethod="estimated",
timeStep=0.2):
for index,step in enumerate(chunk):
i = beginStep + index
print("step #",i)
for f in [obj for obj in gc.get_objects() if isinstance(obj, h5py.File)]:
try:
f.close()
except:
pass
gas_particles_file = os.path.join(os.getcwd(), savingDir,gasFiles[step])
dm_particles_file = None
if len(dmFiles) > 0:
dm_particles_file = os.path.join(os.getcwd(),savingDir, dmFiles[step])
sphGiant = SphGiant(gas_particles_file, dm_particles_file, opposite=True)
sphPointStar = Particle()
sphPointStar.position = sphGiant.position
sphPointStar.velocity = sphGiant.velocity
sphPointStar.mass = sphGiant.mass
sphPointStar.radius = sphGiant.radius
try:
binary = LoadBinaries(dm_particles_file)
companion = binary[1]
except: #no binary
binary = []
companion = sphPointStar
#print binary
if len(binary) > 1:
isBinary= True
binary = Star(companion, sphPointStar)
else:
isBinary=False
#binary = Star(sphPointStar, sphPointStar)
centerOfMassPosition = [0.0,0.0,0.0] | units.m
centerOfMassVelocity = [0.0,0.0,0.0] | units.m/units.s
#print [CalculateVectorSize(part.velocity).as_quantity_in(units.m / units.s) for part in sphGiant.gasParticles]
if isBinary:
if CalculateVectorSize(CalculateSeparation(sphGiant.core, companion)) < min(sphGiant.core.radius,
companion.radius):
print("merger between companion and the giant! step: ", step)
# break
parts = Particles()
parts.add_particle(sphGiant.core)
parts.add_particles(sphGiant.gasParticles)
parts.add_particle(companion)
print("com: ", parts.center_of_mass(), step)
print("com v: ", parts.center_of_mass_velocity(), i)
centerOfMassPosition = parts.center_of_mass()
centerOfMassVelocity = parts.center_of_mass_velocity()
comParticle = Particle()
comParticle.position = centerOfMassPosition
comParticle.velocity = centerOfMassVelocity
sphGiant.CountLeavingParticlesInsideRadius(com_position=centerOfMassPosition,
com_velocity=centerOfMassVelocity, companion=companion,
method=massLossMethod)
print("leaving particles: ", sphGiant.leavingParticles)
print("unbounded mass: ", sphGiant.totalUnboundedMass)
massLoss[i] = sphGiant.totalUnboundedMass.value_in(massLossUnits)
semmimajor = CalculateSemiMajor(CalculateVelocityDifference(companion, sphGiant.core), CalculateSeparation(companion, sphGiant.core),companion.mass + sphGiant.core.mass).as_quantity_in(units.AU)
CalculateEccentricity(companion, sphGiant.core)
#check if the companion is inside, take into account only the inner mass of the companion's orbit
sphGiant.CalculateInnerSPH(companion, com_position=centerOfMassPosition, com_velocity=centerOfMassVelocity)
#print "innerGasMass: ", sphGiant.innerGas.mass.value_in(units.MSun)
innerMass[i] = sphGiant.innerGas.mass.value_in(innerMassUnits)
newBinaryVelocityDifference = CalculateVelocityDifference(companion, sphGiant.innerGas)
newBinarySeparation = CalculateSeparation(companion, sphGiant.innerGas)
newBinaryMass = companion.mass + sphGiant.innerGas.mass
newBinarySpecificEnergy = CalculateSpecificEnergy(newBinaryVelocityDifference,newBinarySeparation,sphGiant.innerGas,companion)
semmimajor = CalculateSemiMajor(newBinaryVelocityDifference, newBinarySeparation, newBinaryMass).as_quantity_in(units.AU)
eccentricity = CalculateEccentricity(companion, sphGiant.innerGas)
eccentricities[i] = eccentricity
binaryDistances[i] = CalculateVectorSize(newBinarySeparation).value_in(binaryDistancesUnits)
sphGiant.CalculateEnergies(comV=centerOfMassVelocity)
uGiant[i] = sphGiant.thermalEnergy.value_in(uGiantUnits)
kGas[i] = sphGiant.gasKinetic.value_in(kGasUnits)
kCore[i] = sphGiant.coreKinetic.value_in(kCoreUnits)
kComp[i] = (0.5 * companion.mass * (CalculateVectorSize(companion.velocity))**2).value_in(kCompUnits)
# total energies
kTot = (sphGiant.kineticEnergy).value_in(kGasUnits) + kComp[i]
pGas[i] = sphGiant.gasPotential.value_in(pGasUnits)
pGiant[i] = sphGiant.potentialEnergy.value_in(pGiantUnits)
pCompCore[i] = CalculatePotentialEnergy(sphGiant.core, companion).value_in(pCompCoreUnits)
pCompGas = (sphGiant.potentialEnergyWithParticle(companion)).value_in(pGasUnits)
pTot[i] = pGiant[i] + pCompGas + pCompCore[i]
eTot[i] = kTot + pTot[i] + uGiant[i]
try:
separation = CalculateSeparation(companion, comParticle)
specificAngularCOM = CalculateSpecificMomentum(CalculateVelocityDifference(companion, comParticle),
separation)
angularOuterCOMx = companion.mass * specificAngularCOM[0]
angularOuterCOMy = companion.mass * specificAngularCOM[1]
angularOuterCOMz = companion.mass * specificAngularCOM[2]
companionAngularMomenta[i] = angularOuterCOMz.value_in(companionAngularMomentaUnits)
companionAngularMomentaTot = ((angularOuterCOMx ** 2 + angularOuterCOMy ** 2 +
angularOuterCOMz ** 2) ** 0.5).value_in(
companionAngularMomentaUnits)
gasAngularMomentaTot = sphGiant.GetAngularMomentumOfGas(centerOfMassPosition, centerOfMassVelocity)
gasAngularMomenta[i] = gasAngularMomentaTot[2].value_in(gasAngularMomentaUnits)
angularGiant = sphGiant.GetAngularMomentum(centerOfMassPosition, centerOfMassVelocity)
giantAngularMomenta[i] = angularGiant[2].value_in(giantAngularMomentaUnits)
angularCore = CalculateSpecificMomentum(CalculateVelocityDifference(sphGiant.core, comParticle),
CalculateSeparation(sphGiant.core, comParticle))
angularCoresx = sphGiant.core.mass * angularCore[0] + angularOuterCOMx
angularCoresy = sphGiant.core.mass * angularCore[1] + angularOuterCOMy
angularCoresz = sphGiant.core.mass * angularCore[2] + angularOuterCOMz
angularCores[i] = angularCoresz.value_in(angularCoresUnits)
#((angularCoresx ** 2 + angularCoresy ** 2 + angularCoresz ** 2) ** 0.5).value_in(angularCoresUnits)
angularTotx = angularGiant[0] + angularOuterCOMx
angularToty = angularGiant[1] + angularOuterCOMy
angularTotz = angularGiant[2] + angularOuterCOMz
totAngularMomenta[i] = angularTotz.value_in(totAngularMomentaUnits)
#((angularTotx ** 2 + angularToty ** 2 + angularTotz ** 2) ** 0.5).value_in(totAngularMomentaUnits)
except:
print("could not calculate angular momenta, ", sys.exc_info()[0])
semmimajors[i] = semmimajor.value_in(semmimajorsUnits)
#check if the binary is breaking up
if newBinarySpecificEnergy > 0 | (units.m **2 / units.s **2):
print("binary is breaking up", binary.specificEnergy, step)
Qxx_g,Qxy_g,Qxz_g,Qyx_g,Qyy_g,Qyz_g,Qzx_g,Qzy_g,Qzz_g = sphGiant.CalculateQuadropoleMoment()
Qxx_p,Qxy_p,Qxz_p,Qyx_p,Qyy_p,Qyz_p,Qzx_p,Qzy_p,Qzz_p = CalculateQuadropoleMomentOfParticle(companion) # add the companion to the calculation
print(Qxx_p, Qxx[i]+Qxx_p+Qxx_g)
Qxx[i] = (Qxx[i]+Qxx_p+Qxx_g)/(10**40)
print(
Qxx[i])
Qxy[i] += (Qxy_p + Qxy_g)/(10**40)
Qxz[i] += (Qxz_p + Qxz_g)/(10**40)
Qyx[i] += (Qyx_p + Qyx_g)/(10**40)
Qyy[i] += (Qyy_p + Qyy_g)/(10**40)
Qyz[i] += (Qyz_p + Qyz_g)/(10**40)
Qzx[i] += (Qzx_p + Qzx_g)/(10**40)
Qzy[i] += (Qzy_p + Qzy_g)/(10**40)
Qzz[i] += (Qzz_p + Qzz_g)/(10**40)
temperature_density_plot(sphGiant, step, outputDir, toPlot)
central_position = centerOfMassPosition
central_velocity = centerOfMassVelocity
sphGiant.gasParticles.position -= central_position
sphGiant.gasParticles.velocity -= central_velocity
sphGiant.core.position -= central_position
sphGiant.core.velocity -= central_velocity
companion.position -= central_position
companion.velocity -= central_velocity
innerAngularMomenta[i] = sphGiant.innerGas.angularMomentum[2].value_in(innerAngularMomentaUnits)
if toPlot:
PlotDensity(sphGiant.gasParticles,sphGiant.core,companion, step , outputDir, vmin, vmax, plotDust= plotDust,
dustRadius=dustRadius, timeStep=timeStep)
PlotDensity(sphGiant.gasParticles,sphGiant.core,companion, step, outputDir, vmin, vmax, plotDust= plotDust,
dustRadius=dustRadius, side_on=True, timeStep=timeStep)
PlotVelocity(sphGiant.gasParticles,sphGiant.core,companion, step, outputDir, vmin, vmax, timeStep=timeStep)
for f in [obj for obj in gc.get_objects() if isinstance(obj,h5py.File)]:
try:
f.close()
except:
pass
def AnalyzeTripleChunk(savingDir, gasFiles, dmFiles, outputDir, chunk, vmin, vmax, beginStep,
binaryDistances, tripleDistances, triple1Distances, triple2Distances,
aInners, aOuters, aOuters1, aOuters2,
eInners, eOuters, eOuters1, eOuters2, inclinations, innerMass, innerMass1, innerMass2, localDensity,
kInner, kOuter, kOuter1, kOuter2, pInner, pOuter, pOuter1, pOuter2,
kGas, uGas, pGas, kCore, pOuterCore, pCores, pPartGas, force, omegaInner, omegaGiant, omegaTot,
kTot, pTot, eTot,
angularInner, angularOuter,angularOuter1,angularOuter2, angularOuterCOM1, angularOuterCOM2, angularOuterCOM,
angularGasCOM, angularTot, localRadius=50.0|units.RSun,
toPlot = False, opposite= False, axesOriginInInnerBinaryCenterOfMass= False, timeStep=0.2):
energyUnits = units.kg*(units.km**2) / units.s**2
specificAngularMomentumUnits = (energyUnits * units.s / units.kg) / 10000
for i in [j - beginStep for j in chunk]:
print(time.ctime(), "step: ", i)
gas_particles_file = os.path.join(os.getcwd(), savingDir,gasFiles[i + beginStep])
dm_particles_file = os.path.join(os.getcwd(),savingDir, dmFiles[i + beginStep])
sphGiant = SphGiant(gas_particles_file, dm_particles_file, opposite= opposite)
print(sphGiant.core)
if i == 1:
print(sphGiant.gasParticles[0])
#print "neigbbours:", sphGiant.FindLowestNumberOfNeighbours()
#print "smallest cell radius: ", sphGiant.FindSmallestCell()
#binary = Particles(2,pickle.load(open(os.path.join(os.getcwd(),savingDir,"binary.p"),"rb")))
binary = LoadBinaries(dm_particles_file, opposite= opposite)
particle1 , particle2 = binary[0] , binary[1]
innerBinary = Star(particle1,particle2)
#change the position and velocity of center of mass to 0
centerOfMassPosition = (sphGiant.position * sphGiant.mass + innerBinary.position * innerBinary.mass) / (sphGiant.mass + innerBinary.mass)
centerOfMassVelocity = (sphGiant.v * sphGiant.mass + innerBinary.velocity * innerBinary.mass) / (sphGiant.mass + innerBinary.mass)
print("center of mass position: ", centerOfMassPosition)
print("center of mass velocity: ", centerOfMassVelocity)
comParticle = Particle()
comParticle.position = centerOfMassPosition
comParticle.velocity = centerOfMassVelocity
triple1 = Star(particle1, sphGiant)
triple2 = Star(particle2, sphGiant)
aInner = CalculateSemiMajor(innerBinary.velocityDifference,innerBinary.separation, innerBinary.mass)
eInner = CalculateEccentricity(particle1,particle2)
if CalculateVectorSize(innerBinary.separation) <= particle1.radius+ particle2.radius:
print("merger between the inner binary!" , innerBinary.separation.as_quantity_in(units.RSun) , i * timeStep)
if CalculateVectorSize(CalculateSeparation(sphGiant.core,particle1)) <= sphGiant.core.radius + particle1.radius:
print("merger between particle1 and the giant!" , i * timeStep)
#break
if CalculateVectorSize(CalculateSeparation(sphGiant.core, particle2)) <= sphGiant.core.radius+ particle2.radius:
print("merger between particle 2 and the giant!" , i * timeStep)
#break
#check if the binry is breaking up
if innerBinary.specificEnergy > 0 | (units.m **2 / units.s **2):
print("binary is breaking up", innerBinary.specificEnergy , i * timeStep)
#check if the couple particle1 + giant are breaking up
if triple1.specificEnergy > 0 | (units.m **2 / units.s **2):
print("triple1 is breaking up", triple1.specificEnergy , i * timeStep)
#check if the couple particle2 + giant are also breaking up
if triple2.specificEnergy > 0 | (units.m **2 / units.s **2):
print("triple2 is also breaking up", triple2.specificEnergy , i * timeStep)
#break
#check if the couple particle2 + giant are breaking up
if triple2.specificEnergy > 0 | (units.m **2 / units.s **2):
print("triple2 is breaking up", triple2.specificEnergy, i * timeStep)
separationStep = 0
#all the three are connected
sphGiant.CountLeavingParticlesInsideRadius()
print("leaving particles: ", sphGiant.leavingParticles)
print("unbounded mass: ", sphGiant.totalUnboundedMass)
print(time.ctime(), "beginning innerGas calculations of step ", i)
sphGiant.CalculateInnerSPH(innerBinary, localRadius)
innerMass[i] = sphGiant.innerGas.mass.value_in(units.MSun)
tripleMass = innerBinary.mass + sphGiant.innerGas.mass
tripleVelocityDifference = CalculateVelocityDifference(innerBinary,sphGiant.innerGas)
tripleSeparation = CalculateSeparation(innerBinary,sphGiant.innerGas)
aOuter = CalculateSemiMajor(tripleVelocityDifference, tripleSeparation, tripleMass)
eOuter = CalculateEccentricity(innerBinary,sphGiant.innerGas)
inclination = CalculateInclination(tripleVelocityDifference, tripleSeparation, innerBinary.velocityDifference, innerBinary.separation)
binaryDistances[i] = CalculateVectorSize(innerBinary.separation).value_in(units.RSun)
tripleDistances[i] = CalculateVectorSize(tripleSeparation).value_in(units.RSun)
aInners[i] = aInner.value_in(units.AU)
aOuters[i] = aOuter.value_in(units.AU)
eInners[i] = eInner
eOuters[i] = eOuter
localDensity[i] = sphGiant.localDensity.value_in(units.MSun/units.RSun**3)
inclinations[i] = inclination
kInner[i]= innerBinary.kineticEnergy.value_in(energyUnits)
pInner[i] = innerBinary.potentialEnergy.value_in(energyUnits)
angularInner[i] = CalculateVectorSize(innerBinary.angularMomentum).value_in(specificAngularMomentumUnits * units.kg)
omegaInner[i] = innerBinary.omega.value_in(energyUnits)
giantForce = sphGiant.gravityWithParticle(particle1) + sphGiant.gravityWithParticle(particle2)
force[i] = CalculateVectorSize(giantForce).value_in(energyUnits/units.km)
#inner gas of the com of the inner binary
kOuter[i] = kInner[i] + sphGiant.innerGas.kineticEnergy.value_in(energyUnits)
pOuter[i] = -(constants.G*sphGiant.innerGas.mass*innerBinary.mass/
(tripleDistances[i] | units.RSun)).value_in(energyUnits)
angularOuter[i] = (innerBinary.mass * sphGiant.innerGas.mass *
(constants.G*aOuter/(innerBinary.mass+sphGiant.innerGas.mass))**0.5)\
.value_in(specificAngularMomentumUnits * units.kg)
#inner gas of particle 1
innerMass1[i] , aOuters1[i], eOuters1[i], triple1Distances[i] = CalculateBinaryParameters(particle1, sphGiant)
kOuter1[i] = (sphGiant.innerGas.kineticEnergy +
0.5*particle1.mass*(particle1.vx**2+particle1.vy**2+particle1.vz**2)).value_in(energyUnits)
pOuter1[i] = -(constants.G*sphGiant.innerGas.mass*particle1.mass/
(triple1Distances[i] | units.RSun)).value_in(energyUnits)
angularOuter1[i] = (particle1.mass*sphGiant.innerGas.mass*
(constants.G*(aOuters1[i] | units.AU)/(particle1.mass+sphGiant.innerGas.mass))**0.5).value_in(specificAngularMomentumUnits* units.kg)
#inner gas of particle2
innerMass2[i] , aOuters2[i], eOuters2[i], triple2Distances[i] = CalculateBinaryParameters(particle2, sphGiant)
kOuter2[i] = (sphGiant.innerGas.kineticEnergy +
0.5*particle1.mass*(particle2.vx**2+particle2.vy**2+particle2.vz**2)).value_in(energyUnits)
pOuter2[i] = (-constants.G*sphGiant.innerGas.mass*particle2.mass/
(triple2Distances[i] | units.RSun)).value_in(energyUnits)
angularOuter2[i] = (particle2.mass*sphGiant.innerGas.mass*
(constants.G*(aOuters2[i] | units.AU)/(particle2.mass+sphGiant.innerGas.mass))**0.5).value_in(specificAngularMomentumUnits * units.kg)
#real energies
sphGiant.CalculateEnergies()
kGas[i] = sphGiant.gasKinetic.value_in(energyUnits)
uGas[i] = sphGiant.thermalEnergy.value_in(energyUnits)
pGas[i] = sphGiant.gasPotential.value_in(energyUnits)
kCore[i] = sphGiant.coreKinetic.value_in(energyUnits)
pOuterCore[i] = (CalculatePotentialEnergy(sphGiant.core,innerBinary)).value_in(energyUnits)
pPartsCore = CalculatePotentialEnergy(sphGiant.core, particle1) + \
CalculatePotentialEnergy(sphGiant.core, particle2)
pCores[i] = pPartsCore.value_in(energyUnits)
pPartGas[i] = (sphGiant.potentialEnergyWithParticle(particle1,sphGiant.core.radius/2.8) +
sphGiant.potentialEnergyWithParticle(particle2,sphGiant.core.radius/2.8)).value_in(energyUnits)
#total energies
kTot[i] = (sphGiant.kineticEnergy).value_in(energyUnits) + kInner[i]
pTot[i] = sphGiant.potentialEnergy.value_in(energyUnits) + pInner[i] + pPartGas[i] + pCores[i]
eTot[i] = kTot[i] + pTot[i] + uGas[i]
print("pTot: ", pTot[i], pGas[i],pOuterCore[i],pInner[i])
print("kTot: ",kTot[i])
print("eTot: ", eTot[i])
try:
separation1 = CalculateSeparation(particle1,comParticle)
specificAngularCOM1 = CalculateSpecificMomentum(CalculateVelocityDifference(particle1,comParticle), separation1)
angularOuterCOM1[i] = particle1.mass.value_in(units.kg)*CalculateVectorSize([specificAngularCOM1[0].value_in(specificAngularMomentumUnits),
specificAngularCOM1[1].value_in(specificAngularMomentumUnits),
specificAngularCOM1[2].value_in(specificAngularMomentumUnits)])
separation2 = CalculateSeparation(particle2, comParticle)
specificAngularCOM2 = CalculateSpecificMomentum(CalculateVelocityDifference(particle2, comParticle),separation2)
angularOuterCOM2[i] = particle2.mass.value_in(units.kg) * CalculateVectorSize([specificAngularCOM2[0].value_in(specificAngularMomentumUnits)
,specificAngularCOM2[1].value_in(specificAngularMomentumUnits)
,specificAngularCOM2[2].value_in(specificAngularMomentumUnits)
])
angularOuterCOMx = particle1.mass * specificAngularCOM1[0] + particle2.mass * specificAngularCOM2[0]
angularOuterCOMy = particle1.mass * specificAngularCOM1[1] + particle2.mass * specificAngularCOM2[1]
angularOuterCOMz = particle1.mass * specificAngularCOM1[2] + particle2.mass * specificAngularCOM2[2]
angularOuterCOM[i] = ((angularOuterCOMx**2+angularOuterCOMy**2+angularOuterCOMz**2)**0.5).value_in(specificAngularMomentumUnits * units.kg)
angularGasCOM[i] = CalculateVectorSize(sphGiant.GetAngularMomentumOfGas(centerOfMassPosition, centerOfMassVelocity)).value_in(specificAngularMomentumUnits * units.kg)
angularGiant = sphGiant.GetAngularMomentum(centerOfMassPosition,centerOfMassVelocity)
angularTotx = angularGiant[0] + angularOuterCOMx
angularToty = angularGiant[1] + angularOuterCOMy
angularTotz = angularGiant[2] + angularOuterCOMz
angularTot[i] = ((angularTotx**2 + angularToty**2 + angularTotz**2)**0.5).value_in(specificAngularMomentumUnits * units.kg)
omegaGiant[i] = sphGiant.omegaPotential.value_in(energyUnits)
comp = Particles(particles=[particle1,particle2])
comp.move_to_center()
comp.position -= comParticle.position
comp.velocity -=comParticle.velocity
omegaTot[i] = omegaInner[i] + omegaGiant[i] + CalculateOmega(comp).value_in(energyUnits)
print("omega tot: ", omegaTot[i])
except:
print("could not calculate angular momenta, ", sys.exc_info()[0])
print(time.ctime(), "temperature_density_plotting of step ", i)
temperature_density_plot(sphGiant, i + beginStep , outputDir, toPlot)
print(time.ctime(), "finished temperature plotting of step: ", i)
if toPlot:
central_position = sphGiant.gas.position
central_velocity = sphGiant.gas.velocity
'''
if axesOriginInInnerBinaryCenterOfMass:
central_position = innerBinary.position
central_velocity = innerBinary.velocity
'''
sphGiant.gasParticles.position -= central_position
sphGiant.gasParticles.velocity -= central_velocity
sphGiant.core.position -= central_position
sphGiant.core.velocity -= central_velocity
binary[0].position -= central_position
binary[0].velocity -= central_velocity
binary[1].position -= central_position
binary[1].velocity -= central_velocity
if axesOriginInInnerBinaryCenterOfMass:
PlotDensity(sphGiant.gasParticles,sphGiant.core,binary,i + beginStep, outputDir, vmin=5e29, vmax= 1e35, width= 30.0 * 3.0 | units.RSun, timeStep=timeStep)
PlotDensity(sphGiant.gasParticles,sphGiant.core,binary,i + beginStep, outputDir, vmin=5e29, vmax= 1e35, width= 30.0 * 3.0 | units.RSun, side_on=True, timeStep=timeStep)
else:
PlotDensity(sphGiant.gasParticles,sphGiant.core,binary,i + beginStep, outputDir, vmin, vmax, width= 4.0 | units.AU, timeStep=timeStep)
PlotDensity(sphGiant.gasParticles,sphGiant.core,binary,i + beginStep, outputDir, vmin, vmax, width= 4.0 | units.AU, side_on=True, timeStep=timeStep)
PlotVelocity(sphGiant.gasParticles,sphGiant.core,binary,i + beginStep, outputDir, vmin, vmax)
#close opened handles
for f in [obj for obj in gc.get_objects() if isinstance(obj,h5py.File)]:
try:
f.close()
except:
pass
def AnalyzeBinary(beginStep, lastStep, dmFiles, gasFiles, savingDir, outputDir, vmin, vmax, toPlot = False,cpus=10,
skip=1,plotDust=False, dustRadius=700.0|units.RSun, massLossMethod="estimated", timeStep=0.2):
if lastStep == 0 : # no boundary on last step
lastStep = len(gasFiles)
else:
lastStep=min(lastStep, len(dmFiles))
print(lastStep)
workingRange = range(beginStep, lastStep,skip)
energyUnits = units.kg*(units.km**2)/(units.s**2)
angularMomentaUnits = energyUnits * units.s * 10000
binaryDistances = MultiProcessArrayWithUnits(len(workingRange),units.RSun)
semmimajors = MultiProcessArrayWithUnits(len(workingRange),units.AU)
eccentricities = MultiProcessArrayWithUnits(len(workingRange),None)
innerMass = MultiProcessArrayWithUnits(len(workingRange),units.MSun)
pGas = MultiProcessArrayWithUnits(len(workingRange),energyUnits)
pGiant = MultiProcessArrayWithUnits(len(workingRange),energyUnits)
pCompCore = MultiProcessArrayWithUnits(len(workingRange),energyUnits)
pTot = MultiProcessArrayWithUnits(len(workingRange),energyUnits)
kGas = MultiProcessArrayWithUnits(len(workingRange),energyUnits)
uGiant = MultiProcessArrayWithUnits(len(workingRange),energyUnits)
kCore = MultiProcessArrayWithUnits(len(workingRange),energyUnits)
kComp = MultiProcessArrayWithUnits(len(workingRange),energyUnits)
eTot = MultiProcessArrayWithUnits(len(workingRange),energyUnits)
innerAngularMomenta = MultiProcessArrayWithUnits(len(workingRange),angularMomentaUnits)
companionAngularMomenta = MultiProcessArrayWithUnits(len(workingRange),angularMomentaUnits)
giantAngularMomenta = MultiProcessArrayWithUnits(len(workingRange),angularMomentaUnits)
gasAngularMomenta = MultiProcessArrayWithUnits(len(workingRange),angularMomentaUnits)
coresAngularMomenta = MultiProcessArrayWithUnits(len(workingRange),angularMomentaUnits)
totAngularMomenta = MultiProcessArrayWithUnits(len(workingRange),angularMomentaUnits)
massLoss = MultiProcessArrayWithUnits(len(workingRange), units.MSun)
Qxx = multiprocessing.Array(c_float, [0.0 for i in workingRange])
Qxy = multiprocessing.Array('f', [0.0 for i in workingRange])
Qxz = multiprocessing.Array('f', [0.0 for i in workingRange])
Qyx = multiprocessing.Array('f', [0.0 for i in workingRange])
Qyy = multiprocessing.Array('f', [0.0 for i in workingRange])
Qyz = multiprocessing.Array('f', [0.0 for i in workingRange])
Qzx = multiprocessing.Array('f', [0.0 for i in workingRange])
Qzy = multiprocessing.Array('f', [0.0 for i in workingRange])
Qzz = multiprocessing.Array('f', [0.0 for i in workingRange])
#chunkSize = (lastStep - beginStep) / 8
chunkSize = int(math.floor(len(workingRange) / cpus))
if chunkSize == 0:
if lastStep - beginStep == 0:
return
else:
chunkSize = 1
leftovers = len(workingRange) - cpus * chunkSize
chunks = []
chunks += [workingRange[i:i+min(chunkSize+1,len(workingRange)-i)] for i in
xrange(0,leftovers*(chunkSize+1),chunkSize+1)]
chunks += [workingRange[i:i+min(chunkSize,len(workingRange)-i)] for i in
xrange(leftovers*(chunkSize+1),len(workingRange),chunkSize)]
processes = []
print(chunks)
i=0
for chunk in chunks:
processes.append(multiprocessing.Process(target= AnalyzeBinaryChunk,args=(savingDir,gasFiles,dmFiles,outputDir,
chunk, vmin, vmax, i,
binaryDistances.array, binaryDistances.units,
semmimajors.array, semmimajors.units,
eccentricities.array,
innerMass.array, innerMass.units,
pGas.array, pGas.units,
pGiant.array, pGiant.units,
pCompCore.array, pCompCore.units,
pTot.array, pTot.units,
kGas.array, kGas.units,
uGiant.array, uGiant.units,
kCore.array, kCore.units,
kComp.array, kComp.units,
eTot.array, eTot.units,
innerAngularMomenta.array, innerAngularMomenta.units,
companionAngularMomenta.array, companionAngularMomenta.units,
giantAngularMomenta.array, giantAngularMomenta.units,
gasAngularMomenta.array, gasAngularMomenta.units,
coresAngularMomenta.array, coresAngularMomenta.units,
totAngularMomenta.array, totAngularMomenta.units,
massLoss.array,massLoss.units,
Qxx,Qxy,Qxz,Qyx,Qyy,Qyz,Qzx,Qzy,Qzz,
toPlot,
plotDust,dustRadius, massLossMethod,
timeStep,)))
i += len(chunk)
#pool.map()
for p in processes:
p.start()
for p in processes:
p.join()
binaryDistances.plot("InnerBinaryDistances", outputDir + "/graphs",timeStep*skip, 1.0*beginStep/skip,False)
semmimajors.plot("aInners", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
innerMass.plot("InnerMass", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
eccentricities.plot("eInners", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
innerAngularMomenta.plot("innerAngularMomenta", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
companionAngularMomenta.plot("companionAngularMomenta", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
giantAngularMomenta.plot("giantAngularMomenta", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
gasAngularMomenta.plot("gasAngularMomenta", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
coresAngularMomenta.plot("coresAngularMomenta", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
totAngularMomenta.plot("totAngularMomenta", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
pGas.plot("pGas", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
pGiant.plot("pGiant", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
pCompCore.plot("pCompCore", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
pTot.plot("pTot", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
kGas.plot("kGas", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
uGiant.plot("uGiant", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
kCore.plot("kCore", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
kComp.plot("kComp", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
eTot.plot("eTot", outputDir + "/graphs",timeStep*skip,1.0*beginStep/skip,False)
massLoss.plot("mass loss", outputDir + "/graphs", timeStep*skip,1.0*beginStep/skip,False)
#PlotQuadropole(Qxx,Qxy,Qxz,Qyx,Qyy,Qyz,Qzx,Qzy,Qzz,outputDir+"/graphs",timeStep*skip,1.0*beginStep/skip)
def AnalyzeTriple(beginStep, lastStep, dmFiles, gasFiles, savingDir, outputDir, vmin, vmax, localRadius=50.0 | units.RSun
,toPlot = False, opposite= False, axesOriginInInnerBinaryCenterOfMass= False, timeStep=0.2):
separationStep = multiprocessing.Value('i')
if lastStep == 0 : # no boundary on last step
lastStep = len(dmFiles)
else:
lastStep=min(lastStep, len(dmFiles))
print(lastStep)
binaryDistances = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
tripleDistances = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
triple1Distances = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
triple2Distances = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
aInners = multiprocessing.Array('f', [0.0 for i in range(beginStep, lastStep)])
aOuters = multiprocessing.Array('f', [0.0 for i in range(beginStep, lastStep)])
aOuters1 = multiprocessing.Array('f', [0.0 for i in range(beginStep, lastStep)]) # for the couple particle1 + giant
aOuters2 = multiprocessing.Array('f', [0.0 for i in range(beginStep, lastStep)]) # for the couple particle2 + giant
eInners = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
eOuters = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
eOuters1 = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)]) # for the couple particle1 + giant
eOuters2 = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)]) # for the couple particle2 + giant
inclinations = multiprocessing.Array('f', range(beginStep, lastStep))
innerMass = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
innerMass1 = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
innerMass2 = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
localDensity = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
kInner = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
kOuter = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
kOuter1 = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
kOuter2 = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
pInner = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
pOuter = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
pOuter1 = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
pOuter2 = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
kGas = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
uGas = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
pGas = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
kCore = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
pOuterCore = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
pCores = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
pPartGas = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
force = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
omegaInner = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
omegaGiant = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
omegaTot = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
kTot = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
pTot = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
eTot = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
angularInner = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
angularOuter = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
angularOuter1 = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
angularOuter2 = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
angularOuterCOM1 = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
angularOuterCOM2 = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
angularOuterCOM = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
angularGasCOM = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
angularTot = multiprocessing.Array('f', [-1.0 for i in range(beginStep, lastStep)])
#angularInner, angularOuter,angularOuter1,angularOuter2 angularOuterCOM1, angularOuterCOM2, angularOuterCOM, angularGasCOM, angularTot
#kInner, kOuter, kOuter1, kOuter2, pInner, pOuter, pOuter1, pOuter2, uInner, uOuter, uOuter1, uOuter2, kGas, uGas, pGas,
# kCore, pOuterCore, pCores, pPartGas, force, omegaInner, omegaGiant, omegaTot, kTot, pTot, uTot, eTot
cpus = multiprocessing.cpu_count() - 6
chunkSize= (lastStep-beginStep)/(multiprocessing.cpu_count() - 6)
print("using ", multiprocessing.cpu_count() - 6, " cpus")
if chunkSize == 0:
if lastStep - beginStep == 0:
return
else:
chunkSize = 1
chunks = [xrange(i,i+chunkSize) for i in xrange(beginStep,lastStep,chunkSize)]
if len(chunks) > 1:
lastChunkBegin = chunks[-2][-1] + 1
else:
lastChunkBegin = beginStep
chunks[-1] = xrange(lastChunkBegin, lastStep)
processes = []
print(chunks)
for chunk in chunks:
processes.append(multiprocessing.Process(target= AnalyzeTripleChunk,args=(savingDir, gasFiles, dmFiles, outputDir, chunk, vmin, vmax, beginStep,
binaryDistances, tripleDistances, triple1Distances, triple2Distances,
aInners, aOuters, aOuters1, aOuters2,
eInners, eOuters, eOuters1, eOuters2, inclinations, innerMass, innerMass1, innerMass2,localDensity,
kInner, kOuter, kOuter1, kOuter2,
pInner, pOuter, pOuter1, pOuter2,
kGas, uGas, pGas, kCore, pOuterCore,
pCores, pPartGas, force,
omegaInner, omegaGiant, omegaTot,
kTot, pTot, eTot,
angularInner, angularOuter,
angularOuter1, angularOuter2,
angularOuterCOM1, angularOuterCOM2,
angularOuterCOM, angularGasCOM, angularTot,
localRadius, toPlot, opposite,
axesOriginInInnerBinaryCenterOfMass,
timeStep,)))
for p in processes:
p.start()
for p in processes:
p.join()
newBinaryDistances = AdaptingVectorQuantity()
newTripleDistances = AdaptingVectorQuantity()
newTriple1Distances = AdaptingVectorQuantity()
newTriple2Distances = AdaptingVectorQuantity()
newAInners = AdaptingVectorQuantity()
newAOuters = AdaptingVectorQuantity()
newAOuters1 = AdaptingVectorQuantity()
newAOuters2 = AdaptingVectorQuantity()
newInnerMass = AdaptingVectorQuantity()
newInnerMass1 = AdaptingVectorQuantity()
newInnerMass2 = AdaptingVectorQuantity()
newLocalDensity = AdaptingVectorQuantity()
for j in xrange(len(binaryDistances)-1):
newBinaryDistances.append(float(binaryDistances[j]) | units.RSun)
newTripleDistances.append(float(tripleDistances[j]) | units.RSun)
newTriple1Distances.append(float(triple1Distances[j]) | units.RSun)
newTriple2Distances.append(float(triple2Distances[j]) | units.RSun)
newAInners.append(float(aInners[j]) | units.AU)
newAOuters.append(float(aOuters[j]) | units.AU)
newAOuters1.append(float(aOuters1[j]) | units.AU)
newAOuters2.append(float(aOuters2[j]) | units.AU)
newInnerMass.append(float(innerMass[j]) | units.MSun)
newInnerMass1.append(float(innerMass1[j]) | units.MSun)
newInnerMass2.append(float(innerMass2[j]) | units.MSun)
newLocalDensity.append(float(localDensity[j]) | units.MSun / units.RSun**3)
separationStep = int(separationStep.value)
PlotBinaryDistance([(newBinaryDistances, "InnerBinaryDistances"), (newTripleDistances, "tripleDistances"), (newTriple1Distances, "triple1Distances"),
(newTriple2Distances, "triple2Distances")], outputDir + "/graphs", beginStep,timeStep,toPlot)
PlotAdaptiveQuantities([(newAInners,"aInners"),(newAOuters, "aOuters")], outputDir+"/graphs",beginStep,timeStep,toPlot)
PlotAdaptiveQuantities([(newAOuters1, "aOuters1"), (newAOuters2, "aOuters2")], outputDir+ "/graphs", separationStep,timeStep,toPlot)
PlotEccentricity([(eInners, "eInners"), (eOuters, "eOuters")], outputDir + "/graphs", beginStep, timeStep, toPlot)
PlotEccentricity([(eOuters1, "eOuters1"), (eOuters2, "eOuters2")],outputDir + "/graphs", separationStep,timeStep,toPlot)
Plot1Axe(inclinations,"inclinations", outputDir+"/graphs", beginStep=beginStep, toPlot=toPlot)
PlotAdaptiveQuantities([(innerMass, "InnerMass"), (innerMass1, "InnerMass1"), (innerMass2, "InnerMass2"),
(localDensity, "LocalDensity"),(kInner,"kInner"), (kOuter,"kOuter"), (kOuter1,"kOuter1"),
(kOuter2,"kOuter2"),(pInner,"pInner"), (pOuter,"pOuter"), (pOuter1,"pOuter1"),
(pOuter2,"pOuter2"),(kGas,"kGas"), (uGas,"uGas"), (pGas,"pGas"), (kCore,"kCore"),
(pOuterCore,"pOuterCore"),(pCores,"pCores"), (pPartGas,"pPartGas"), (force,"force"),
(omegaInner,"omegaInner"), (omegaGiant,"omegaGiant"), (omegaTot,"omegaTot"),
(kTot,"kTot"), (pTot,"pTot"), (eTot,"eTot"),
(angularInner,"angularInner"), (angularOuter,"angularOuter"), (angularOuter1,"angularOuter1"),
(angularOuter2,"angularOuter2"), (angularOuterCOM1,"angularOuterCOM1"),
(angularOuterCOM2,"angularOuterCOM2"), (angularOuterCOM,"angularOuterCOM"), (angularGasCOM,"angularGasCOM"),
(angularTot,"angularTot")], outputDir + "/graphs", beginStep,timeStep, toPlot)
def InitParser():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--beginStep', type=int, help='first step', default=0)
parser.add_argument('--lastStep', type=int, help='last step', default=0)
parser.add_argument('--timeStep', type=float, help='time between files in days', default=0.2)
parser.add_argument('--skip', type=int, help='number of steps to skip', default=1)
parser.add_argument('--source_dir', type=str, help='path to amuse files directory', default= sys.argv[0])
parser.add_argument('--savingDir', type=str, help='path to output directory', default= "evolution")
parser.add_argument('--vmin', type=float, help='minimum density plotting', default=1e16)
parser.add_argument('--vmax', type=float, help='maximum density plotting', default=1e34)
parser.add_argument('--plot', type=lambda x: (str(x).lower() in ['true', '1', 'yes']), help='do you want to plot profiles?', default=False)
parser.add_argument('--axesOriginInInnerBinaryCenterOfMass', type=lambda x: (str(x).lower() in ['true', '1', 'yes']), help='do you want to plot the inner binary at the origin?', default=False)
parser.add_argument('--opposite', type=lambda x: (str(x).lower() in ['true', '1', 'yes']), help='do you want the main star to be a part of the inner binary?', default=False)
parser.add_argument('--localRadius', type=float, help='maximum density plotting', default=50.0)
parser.add_argument('--cpus', type=int, help='number of cpus', default=10)
parser.add_argument('--massLossMethod', type=str, help='estimated or direct', default= "estimated")
return parser
def GetArgs(args):
if len(args) > 1:
directory=args[1]
else:
directory = args[0]
if len(args) > 2:
savingDir = directory + "/" + args[2]
if args[2] == "snapshots":
toCompare = False
else:
toCompare = True
else:
savingDir = directory + "/evolution"
toCompare = True
if len(args) > 3:
beginStep = int(args[3])
else:
beginStep = 0
if len(args) > 4:
lastStep = int(args[4])
else:
lastStep = 0
if len(args) > 5:
vmin= float(args[5])
else:
vmin = 1e16
if len(args) > 6:
vmax = float(args[6])
else:
vmax= 1e34
if len(args) >7:
plot = bool(int(args[7]))
else:
plot = False
if len(args) >8:
axesOriginInInnerBinaryCenterOfMass = bool(int(args[8]))
else:
axesOriginInInnerBinaryCenterOfMass = False
if len(args) >9:
opposite = bool(int(args[9]))
else:
opposite = False
if len(args) > 10:
timeStep=float(args[10])
else:
timeStep = 0.2
if len(args) > 11:
localRadius = float(args[11]) | units.RSun
else:
localRadius = 50.0 | units.RSun
outputDir = savingDir + "/pics"
return savingDir, toCompare, beginStep, lastStep, vmin, vmax, outputDir, plot, axesOriginInInnerBinaryCenterOfMass, opposite, timeStep, localRadius
def InitializeSnapshots(savingDir, toCompare=False, firstFile=0):
'''
taking the snapshots directory of past run
Returns: sorted dm snapshots and gas snapshots
'''
snapshots = os.listdir(os.path.join(os.getcwd(),savingDir))
numberOfSnapshots = len(snapshots) / 2
dmFiles = []
gasFiles = []
for snapshotFile in snapshots:
if 'dm' in snapshotFile: #if the word dm is in the filename
dmFiles.append(snapshotFile)
if 'gas' in snapshotFile:
gasFiles.append(snapshotFile)
if toCompare:
dmFiles.sort(cmp=compare)
gasFiles.sort(cmp= compare)
else:
dmFiles.sort()
gasFiles.sort()
numberOfCompanion = 0
if len(dmFiles) > 0:
try:
numberOfCompanion = len(read_set_from_file(os.path.join(os.getcwd(), savingDir,dmFiles[firstFile]), format='amuse'))
except:
numberOfCompanion = len(read_set_from_file(os.path.join(os.getcwd(), savingDir,dmFiles[0]), format='amuse'))
return gasFiles, dmFiles, numberOfCompanion
def compare(st1, st2):
num1 = int(st1.split("_")[1].split(".")[0])
num2 = int(st2.split("_")[1].split(".")[0])
if num1 < num2:
return -1
return 1
def main(args= ["../../BIGDATA/code/amuse-10.0/runs200000/run_003","evolution",0,1e16,1e34, 1]):
parser=InitParser()
args=parser.parse_args()
savingDir = os.path.join(args.source_dir, args.savingDir)
outputDir = os.path.join(savingDir,"pics")
toCompare = (args.savingDir != "snapshots")
print("plotting to " + outputDir + " plot- " + str(args.plot) + " from " + args.savingDir +" begin step = " , args.beginStep , \
" vmin, vmax = " , args.vmin, args.vmax, "special comparing = ", toCompare, "axes at the origin? ", \
args.axesOriginInInnerBinaryCenterOfMass, "opossite? ", args.opposite, "timeStep= ", args.timeStep,
"localRadius= ",args.localRadius)
'''savingDir, toCompare, beginStep, lastStep, vmin, vmax, outputDir, plot, axesOriginInInnerBinaryCenterOfMass, \
opposite, timeStep, localRadius = GetArgs(args)
print "plotting to " + outputDir + " plot- " + str(plot) + " from " + savingDir +" begin step = " , beginStep , \
" vmin, vmax = " , vmin, vmax, "special comparing = ", toCompare, "axes at the origin? ", \
axesOriginInInnerBinaryCenterOfMass, "opossite? ", opposite, "timeStep= ", timeStep, "localRadius= ",localRadius
'''
try:
os.makedirs(outputDir)
except(OSError):
pass
try:
os.makedirs(outputDir + "/side_on")
except(OSError):
pass
try:
os.makedirs(outputDir + "/velocity")
except(OSError):
pass
try:
os.makedirs(outputDir + "/graphs")
except (OSError):
pass
try:
os.makedirs(outputDir + "/radial_profile")
except(OSError):
pass
gasFiles, dmFiles, numberOfCompanion = InitializeSnapshots(savingDir, toCompare,args.beginStep)
if numberOfCompanion <= 2: #binary
print("analyzing binary")
AnalyzeBinary(beginStep=args.beginStep,lastStep=args.lastStep, dmFiles=dmFiles, gasFiles=gasFiles,
savingDir=savingDir, outputDir=outputDir, vmin=args.vmin, vmax=args.vmax, toPlot=args.plot,
plotDust=False, timeStep=args.timeStep, skip=args.skip, cpus= args.cpus,
massLossMethod=args.massLossMethod)
elif numberOfCompanion ==3: #triple
AnalyzeTriple(beginStep=args.beginStep, lastStep=args.lastStep, dmFiles=dmFiles, gasFiles=gasFiles,
savingDir=savingDir, outputDir=outputDir, vmin=args.vmin, vmax=args.vmax, localRadius=args.localRadius,
toPlot=args.plot, opposite=args.opposite,
axesOriginInInnerBinaryCenterOfMass=args.axesOriginInInnerBinaryCenterOfMass, timeStep=args.timeStep)
if __name__ == "__main__":
for arg in sys.argv:
print(arg)
print(len(sys.argv))
main(sys.argv)
| gpl-2.0 |
JVillella/tensorflow | tensorflow/examples/learn/hdf5_classification.py | 75 | 2899 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
saullocastro/pyNastran | pyNastran/op2/tables/oes_stressStrain/real/oes_bars100.py | 1 | 12846 | from __future__ import (nested_scopes, generators, division, absolute_import,
print_function, unicode_literals)
from six import iteritems
from itertools import count
import numpy as np
from numpy import zeros, searchsorted, ravel
from pyNastran.op2.tables.oes_stressStrain.real.oes_objects import StressObject, StrainObject, OES_Object
from pyNastran.f06.f06_formatting import write_floats_13e, _eigenvalue_header
try:
import pandas as pd
except ImportError:
pass
class RealBar10NodesArray(OES_Object):
def __init__(self, data_code, is_sort1, isubcase, dt):
OES_Object.__init__(self, data_code, isubcase, apply_data_code=False)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.ielement = 0
self.nelements = 0 # result specific
self.nnodes = None
if is_sort1:
if dt is not None:
#self.add = self.add_sort1
self.add_new_eid = self.add_new_eid_sort1
#self.addNewNode = self.addNewNodeSort1
else:
raise NotImplementedError('SORT2')
#assert dt is not None
#self.add = self.add_sort2
#self.add_new_eid = self.add_new_eid_sort2
#self.addNewNode = self.addNewNodeSort2
def is_real(self):
return True
def is_complex(self):
return False
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def _get_msgs(self):
raise NotImplementedError('%s needs to implement _get_msgs' % self.__class__.__name__)
def get_headers(self):
raise NotImplementedError('%s needs to implement get_headers' % self.__class__.__name__)
def build(self):
#print("self.ielement =", self.ielement)
# print('RealBar10NodesArray isubcase=%s ntimes=%s nelements=%s ntotal=%s' % (
# self.isubcase, self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
if self.element_type == 100:
nnodes_per_element = 1
else:
raise NotImplementedError(self.element_type)
self.nnodes = nnodes_per_element
self.nelements //= self.ntimes
#self.ntotal = self.nelements #* 2 # for A/B
#self.nelements //= nnodes_per_element
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("***name=%s type=%s nnodes_per_element=%s ntimes=%s nelements=%s ntotal=%s" % (
#self.element_name, self.element_type, nnodes_per_element, self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, int):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.ntotal, dtype='int32')
#[sd, sxc, sxd, sxe, sxf, axial, smax, smin, MS]
self.data = zeros((self.ntimes, self.ntotal, 9), dtype='float32')
def build_dataframe(self):
headers = self.get_headers()
if self.nonlinear_factor is not None:
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = pd.Panel(self.data, items=column_values, major_axis=self.element, minor_axis=headers).to_frame()
self.data_frame.columns.names = column_names
self.data_frame.index.names = ['ElementID', 'Item']
else:
self.data_frame = pd.Panel(self.data, major_axis=self.element, minor_axis=headers).to_frame()
self.data_frame.columns.names = ['Static']
self.data_frame.index.names = ['ElementID', 'Item']
def __eq__(self, table):
assert self.is_sort1() == table.is_sort1()
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1():
for itime in range(ntimes):
for ieid, eid, in enumerate(self.element):
t1 = self.data[itime, inid, :]
t2 = table.data[itime, inid, :]
(axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1) = t1
(axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2) = t2
if not np.allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s)\n' % (
eid,
axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1,
axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2())
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_new_eid(self, eType, dt, eid, sd, sxc, sxd, sxe, sxf, axial, smax, smin, MS):
self.add_new_eid_sort1(eType, dt, eid,
sd, sxc, sxd, sxe, sxf, axial, smax, smin, MS)
def add_new_eid_sort1(self, eType, dt, eid,
sd, sxc, sxd, sxe, sxf, axial, smax, smin, MS):
self._times[self.itime] = dt
# print('isubcase=%s itotal=%s ieid=%s eid=%s' % (self.isubcase, self.itotal, self.ielement, eid))
self.element[self.itotal] = eid
self.data[self.itime, self.itotal, :] = [sd, sxc, sxd, sxe, sxf, axial, smax, smin, MS]
self.itotal += 1
self.ielement += 1
def get_stats(self):
if not self.is_built:
return ['<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
nnodes = self.nnodes
ntotal = self.ntotal
#nlayers = 2
nelements = self.ntotal // self.nnodes # // 2
msg = []
if self.nonlinear_factor is not None: # transient
msg.append(' type=%s ntimes=%i nelements=%i nnodes_per_element=%i ntotal=%i\n'
% (self.__class__.__name__, ntimes, nelements, nnodes, ntotal))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i nnodes_per_element=%i ntotal=%i\n'
% (self.__class__.__name__, nelements, nnodes, ntotal))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
assert n == self.data.shape[2], 'nheaders=%s shape=%s' % (n, str(self.data.shape))
msg.append(' data: [%s, ntotal, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element type: %s\n ' % self.element_name)
msg += self.get_data_code()
return msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element_node[:, 0]) #[0]
return itot
#def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element_node[:, 0] == eid) for eid in eids])
##ind = searchsorted(eids, self.element)
##ind = ind.reshape(ind.size)
##ind.sort()
#return ind
def write_f06(self, f, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg = self._get_msgs()
#print('CBAR ntimes=%s ntotal=%s' % (ntimes, ntotal))
if self.is_sort1():
page_num = self._write_sort1_as_sort1(f, header, page_stamp, msg, page_num)
else:
raise RuntimeError()
return page_num
def _write_sort1_as_sort1(self, f06_file, header, page_stamp, msg, page_num):
(ntimes, ntotal) = self.data.shape[:2]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg))
sd = self.data[itime, :, 0]
sxc = self.data[itime, :, 1]
sxd = self.data[itime, :, 2]
sxe = self.data[itime, :, 3]
sxf = self.data[itime, :, 4]
axial = self.data[itime, :, 5]
smax = self.data[itime, :, 6]
smin = self.data[itime, :, 7]
MS = self.data[itime, :, 8]
for (i, eid, sdi, sxci, sxdi, sxei, sxfi, axiali, smaxi, smini, MSi) in zip(
count(), eids, sd, sxc, sxd, sxe, sxf, axial, smax, smin, MS):
vals = [sdi, sxci, sxdi, sxei, sxfi, axiali, smaxi, smini, MSi]
vals2 = write_floats_13e(vals)
[sdi, sxci, sxdi, sxei, sxfi, axiali, smaxi, smini, MSi] = vals2
f06_file.write('0%8i %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s %s\n'
% (eid, sdi, sxci, sxdi, sxei, sxfi, axiali, smaxi, smini, MSi))
f06_file.write(page_stamp % page_num)
page_num += 1
if self.nonlinear_factor is None:
page_num -= 1
return page_num
class RealBar10NodesStressArray(RealBar10NodesArray, StressObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealBar10NodesArray.__init__(self, data_code, is_sort1, isubcase, dt)
StressObject.__init__(self, data_code, isubcase)
def get_headers(self):
#if self.is_fiber_distance():
#fiber_dist = 'fiber_distance'
#else:
#fiber_dist = 'fiber_curvature'
#if self.is_von_mises():
#ovm = 'von_mises'
#else:
#ovm = 'max_shear'
headers = ['sd', 'sxc', 'sxd', 'sxe', 'sxf', 'axial', 'smax', 'smin', 'MS']
return headers
def _get_msgs(self):
msg = [
' S T R E S S D I S T R I B U T I O N I N B A R E L E M E N T S ( C B A R )\n'
'0 ELEMENT STATION SXC SXD SXE SXF AXIAL S-MAX S-MIN M.S.-T\n'
' ID. (PCT) M.S.-C\n'
#' 1 0.000 4.919032E+05 -4.348710E+05 -4.348710E+05 4.919032E+05 0.0 4.919032E+05 -4.348710E+05 \n'
]
return msg
class RealBar10NodesStrainArray(RealBar10NodesArray, StrainObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealBar10NodesArray.__init__(self, data_code, is_sort1, isubcase, dt)
StrainObject.__init__(self, data_code, isubcase)
def get_headers(self):
#if self.is_fiber_distance():
#fiber_dist = 'fiber_distance'
#else:
#fiber_dist = 'fiber_curvature'
#if self.is_von_mises():
#ovm = 'von_mises'
#else:
#ovm = 'max_shear'
headers = ['sd', 'sxc', 'sxd', 'sxe', 'sxf', 'axial', 'smax', 'smin', 'MS']
return headers
def _get_msgs(self):
msg = [
' S T R A I N D I S T R I B U T I O N I N B A R E L E M E N T S ( C B A R )\n'
'0 ELEMENT STATION SXC SXD SXE SXF AXIAL S-MAX S-MIN M.S.-T\n'
' ID. (PCT) M.S.-C\n'
#' 1 0.000 4.919032E+05 -4.348710E+05 -4.348710E+05 4.919032E+05 0.0 4.919032E+05 -4.348710E+05 \n'
]
return msg
| lgpl-3.0 |
bnaul/scikit-learn | examples/text/plot_hashing_vs_dict_vectorizer.py | 23 | 3253 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
# categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data, _ = fetch_20newsgroups(subset='train', categories=categories,
return_X_y=True)
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
etamponi/mrca | mrca/evaluation/prepare_figures.py | 1 | 13226 | from collections import defaultdict
import matplotlib
import numpy
import sys
from mrca.evaluation.collect_results import prepare_data_per_profile, prepare_data_per_clustering, prepare_raw_data
from mrca.evaluation import *
__author__ = 'Emanuele Tamponi'
NL = "\n"
def main():
configure_matplotlib()
from matplotlib import pyplot
raw_data = prepare_raw_data()
data_per_profile = prepare_data_per_profile()
data_per_cluster = prepare_data_per_clustering()
for classifier in CLASSIFIER_NAMES:
prepare_performance_table(data_per_profile, classifier)
prepare_mean_correlation_table(data_per_profile, classifier)
for probe, clusterer, classifier in product(PROBE_NAMES, CLUSTER_NAMES, CLASSIFIER_NAMES):
best_profile_conf, count = get_best_profile_confs(data_per_profile, 1, probe, clusterer, classifier)[0]
prepare_profile_table(best_profile_conf, count, data_per_cluster, clusterer, classifier)
for clusterer, classifier in product(CLUSTER_NAMES, CLASSIFIER_NAMES):
prepare_best_result_table(data_per_cluster, clusterer, classifier)
prepare_best_result_plots(pyplot, raw_data, data_per_cluster, clusterer, classifier)
def prepare_performance_table(data_per_profile, classifier):
table_name = "table_performance_{}".format(classifier)
with open("figures/{}.tex".format(table_name), "w") as f:
f.writelines((r"\begin{table}\centering", NL))
f.writelines((r"\renewcommand{\arraystretch}{1.2}", NL))
f.writelines((r"\renewcommand{\tabcolsep}{4pt}", NL))
f.writelines((r"\small", NL))
f.writelines((
r"\begin{tabularx}{0.80\textwidth}{*{2}{>{\raggedleft \arraybackslash}X}p{3mm}*{5}{r}p{3mm}*{5}{r}}", NL
))
f.writelines((r"\toprule", NL))
f.writelines((
r" & & & \multicolumn{5}{c}{Imbalance Probe} & & \multicolumn{5}{c}{Linear Boundary} \\", NL,
r"$\countn_1$ & $\countn_\profiledim$ & & 5 & 10 & 15 & 20 & 25 & & 5 & 10 & 15 & 20 & 25 \\", NL,
r"\midrule", NL
))
for smallest_size in [0.05, 0.10, 0.15, 0.20, 0.25]:
for largest_size in [0.40, 0.45, 0.50, 0.55, 0.60]:
size_range = (smallest_size, largest_size)
if largest_size == 0.50:
f.write(r"{}\%".format(int(100*smallest_size)))
f.write(" & {}\% & ".format(int(100*largest_size)))
for probe in PROBE_NAMES:
for profile_dim in PROFILE_DIMS:
corrs = numpy.asarray(
[x[1] for x in data_per_profile[(probe, profile_dim, size_range), classifier]]
)
positive = (corrs >= 0.80).sum()
f.write("& {}".format(positive))
if probe == "imb":
f.write("& ")
if largest_size == 0.60 and smallest_size < 0.25:
f.writelines((r" \\[3mm]", NL))
else:
f.writelines((r" \\", NL))
f.writelines((
r"\bottomrule", NL,
r"\end{tabularx}", NL
))
caption = (
r"Number of positive results for each profile configuration. Classifier: {}".format(LEGEND[classifier])
)
f.writelines((r"\caption{%s}" % caption, NL))
f.writelines((r"\label{tab:%s}" % table_name, NL))
f.writelines((r"\end{table}", NL))
def prepare_mean_correlation_table(data_per_profile, classifier):
table_name = "table_mean_correlation_{}".format(classifier)
with open("figures/{}.tex".format(table_name), "w") as f:
f.writelines((r"\begin{table}\centering", NL))
f.writelines((r"\renewcommand{\arraystretch}{1.2}", NL))
f.writelines((r"\renewcommand{\tabcolsep}{4pt}", NL))
f.writelines((r"\small", NL))
f.writelines((
r"\begin{tabularx}{0.80\textwidth}{*{2}{>{\raggedleft \arraybackslash}X}p{3mm}*{5}{r}p{3mm}*{5}{r}}", NL
))
f.writelines((r"\toprule", NL))
f.writelines((
r" & & & \multicolumn{5}{c}{Imbalance Probe} & & \multicolumn{5}{c}{Linear Boundary} \\", NL,
r"$\countn_1$ & $\countn_\profiledim$ & & 5 & 10 & 15 & 20 & 25 & & 5 & 10 & 15 & 20 & 25 \\", NL,
r"\midrule", NL
))
for smallest_size in [0.05, 0.10, 0.15, 0.20, 0.25]:
for largest_size in [0.40, 0.45, 0.50, 0.55, 0.60]:
size_range = (smallest_size, largest_size)
if largest_size == 0.50:
f.write(r"{}\%".format(int(100*smallest_size)))
f.write(" & {}\% & ".format(int(100*largest_size)))
for probe in PROBE_NAMES:
for profile_dim in PROFILE_DIMS:
corrs = numpy.asarray(
[x[1] for x in data_per_profile[(probe, profile_dim, size_range), classifier]]
)
positive = 100 * corrs[corrs >= 0.80].mean()**2
f.write("& {:.1f}".format(positive))
if probe == "imb":
f.write("& ")
if largest_size == 0.60 and smallest_size < 0.25:
f.writelines((r" \\[3mm]", NL))
else:
f.writelines((r" \\", NL))
f.writelines((
r"\bottomrule", NL,
r"\end{tabularx}", NL
))
caption = (
r"Number of positive results for each profile configuration. Classifier: {}".format(LEGEND[classifier])
)
f.writelines((r"\caption{%s}" % caption, NL))
f.writelines((r"\label{tab:%s}" % table_name, NL))
f.writelines((r"\end{table}", NL))
def prepare_best_result_plots(pyplot, raw_data, data_per_cluster, clusterer, classifier):
for dataset, n_clusters in product(DATASET_NAMES, CLUSTER_NUMS):
clustering_conf = (dataset, clusterer, n_clusters)
profile_conf, corr = data_per_cluster[clustering_conf, classifier][0]
data = raw_data[clustering_conf, profile_conf]
draw_plot(pyplot, data, corr, clustering_conf, profile_conf, classifier)
def draw_plot(pyplot, data, corr, clustering_conf, profile_conf, classifier):
dataset, clusterer, n_clusters = clustering_conf
figure_name = "best_plot_{}_{}_{}_{}".format(clusterer, n_clusters, classifier, dataset)
sizes = data["size"]
mris = data["mri"][sizes > 0]
errs = data[classifier][sizes > 0]
pyplot.figure()
pyplot.gcf().set_size_inches(2, 2)
pyplot.plot(mris, errs, "ko-")
x_min, x_max = pyplot.xlim()
y_min, y_max = pyplot.ylim()
pyplot.xticks(numpy.linspace(x_min, x_max, 3))
pyplot.yticks(numpy.linspace(y_min, y_max, 3))
pyplot.axes().set_aspect((x_max - x_min) / (y_max - y_min))
pyplot.grid()
pyplot.title(r"{:.1f}\%{}".format(100*corr*corr, r" $\bullet$" if corr >= 0.8 else ""))
pyplot.savefig("figures/{}.pdf".format(figure_name), bbox_inches="tight")
pyplot.close()
def prepare_best_result_table(data_per_cluster, clusterer, classifier):
table_name = "table_best_results_{}_{}".format(clusterer, classifier)
totals = defaultdict(int)
with open("figures/{}.tex".format(table_name), "w") as f:
f.writelines((r"\begin{table}\centering", NL))
f.writelines((r"\renewcommand{\arraystretch}{1.1}", NL))
f.writelines((r"\renewcommand{\tabcolsep}{3pt}", NL))
f.writelines((r"\small", NL))
f.writelines((r"\begin{tabularx}{0.80\textwidth}{Xr@{.}l@{}lr@{.}l@{}lr@{.}l@{}lr@{.}l@{}lr@{.}l@{}l}", NL))
f.writelines((r"\toprule", NL))
f.writelines((
r" & \multicolumn{15}{c}{Number of clusters} \\", NL,
r"\cmidrule{2-16}", NL,
r"Dataset & \multicolumn{3}{c}{2} & \multicolumn{3}{c}{3} & \multicolumn{3}{c}{4}",
r" & \multicolumn{3}{c}{5} & \multicolumn{3}{c}{6} \\", NL,
r"\midrule", NL,
))
for dataset in DATASET_NAMES:
f.write("{} ".format(dataset))
for n_clusters in CLUSTER_NUMS:
corr = data_per_cluster[(dataset, clusterer, n_clusters), classifier][0][1]
corr_str = "{:.2f}".format(
100*corr**2
).replace(".", "&")
if corr >= 0.80:
f.write(r"& {} & $\bullet$ ".format(corr_str))
totals[n_clusters] += 1
else:
f.write(r"& {} & ".format(corr_str))
f.writelines((r" \\", NL))
f.writelines((
r"\midrule", NL,
r"Positive results & ", r" & & ".join(
("\multicolumn{2}{r}{%d}" % totals[n]) for n in CLUSTER_NUMS
), r" & \\", NL,
r"\bottomrule", NL,
r"\end{tabularx}", NL
))
caption = (
r"Best results for each dataset and number of clusters. Clusterer: {}. Compared classifier: {}.".format(
LEGEND[clusterer], LEGEND[classifier]
)
)
f.writelines((r"\caption{%s}" % caption, NL))
f.writelines((r"\label{tab:%s}" % table_name, NL))
f.writelines((r"\end{table}", NL))
def prepare_profile_table(profile_conf, count, data_per_cluster, clusterer, classifier):
probe, profile_dim, size_range = profile_conf
table_name = "table_profile_{}_{:02d}_{:02d}_{:02d}_{}_{}".format(
probe, profile_dim, int(100*size_range[0]), int(100*size_range[1]), clusterer, classifier
)
totals = defaultdict(int)
with open("figures/{}.tex".format(table_name), "w") as f:
f.writelines((r"\begin{table}\centering", NL))
f.writelines((r"\renewcommand{\arraystretch}{1.1}", NL))
f.writelines((r"\renewcommand{\tabcolsep}{3pt}", NL))
f.writelines((r"\small", NL))
f.writelines((r"\begin{tabularx}{0.80\textwidth}{Xr@{.}l@{}lr@{.}l@{}lr@{.}l@{}lr@{.}l@{}lr@{.}l@{}l}", NL))
f.writelines((r"\toprule", NL))
f.writelines((
r" & \multicolumn{15}{c}{Number of clusters} \\", NL,
r"\cmidrule{2-16}", NL,
r"Dataset & \multicolumn{3}{c}{2} & \multicolumn{3}{c}{3} & \multicolumn{3}{c}{4}",
r" & \multicolumn{3}{c}{5} & \multicolumn{3}{c}{6} \\", NL,
r"\midrule", NL,
))
for dataset in DATASET_NAMES:
f.write("{} ".format(dataset))
for n_clusters in CLUSTER_NUMS:
corr = dict(data_per_cluster[(dataset, clusterer, n_clusters), classifier])[profile_conf]
corr_str = "{:.2f}".format(
100*corr**2
).replace(".", "&")
if corr >= 0.80:
f.write(r"& {} & $\bullet$ ".format(corr_str))
totals[n_clusters] += 1
else:
f.write(r"& {} & ".format(corr_str))
f.writelines((r" \\", NL))
f.writelines((
r"\midrule", NL,
r"Positive results & ", r" & & ".join(
("\multicolumn{2}{r}{%d}" % totals[n]) for n in CLUSTER_NUMS
), r" & \\", NL,
r"\bottomrule", NL,
r"\end{tabularx}", NL
))
caption = (
r"Results for {} Probe, \profiledim = {}, $\countn_1 = {}\%$, $\countn_\profiledim = {}\%$. ".format(
LEGEND[probe], profile_dim, int(100*size_range[0]), int(100*size_range[1]))
)
caption += (
r"Clusterer: {}. Compared classifier: {}.".format(LEGEND[clusterer], LEGEND[classifier])
)
f.writelines((r"\caption{%s}" % caption, NL))
f.writelines((r"\label{tab:%s}" % table_name, NL))
f.writelines((r"\end{table}", NL))
def get_best_profile_confs(data_per_profile, n, probe, clusterer, classifier):
print "Best {} {} profile confs".format(n, probe)
counts = {}
for profile_conf in PROFILE_CONFS:
if profile_conf[0] != probe:
continue
corrs = numpy.asarray([x[1] for x in data_per_profile[profile_conf, classifier] if x[0][1] == clusterer])
counts[profile_conf] = (corrs >= 0.80).sum()
sorted_counts = sorted(counts.items(), key=lambda x: -x[1])
for conf, count in sorted_counts[:n]:
print conf, count
return sorted_counts[:n]
def configure_matplotlib():
matplotlib.use('pgf')
pgf_rc = {
"font.family": "serif", # use serif/main font for text elements
"text.usetex": True, # use inline math for ticks
"pgf.rcfonts": False, # don't setup fonts from rc parameters
"pgf.texsystem": "pdflatex",
"pgf.preamble": [
r"\usepackage[utf8]{inputenc}",
r"\usepackage{microtype}",
r"\usepackage{amsfonts}",
r"\usepackage{amsmath}",
r"\usepackage{amssymb}",
r"\usepackage{booktabs}",
r"\usepackage{fancyhdr}",
r"\usepackage{graphicx}",
r"\usepackage{nicefrac}",
r"\usepackage{xspace}"
]
}
matplotlib.rcParams.update(pgf_rc)
if __name__ == '__main__':
main()
| gpl-2.0 |
kaichogami/scikit-learn | examples/mixture/plot_gmm_sin.py | 36 | 2804 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], color=color, s=4)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
pastephens/pysal | pysal/contrib/pdio/dbf.py | 7 | 6661 | """miscellaneous file manipulation utilities
"""
import numpy as np
import pysal as ps
import pandas as pd
def check_dups(li):
"""checks duplicates in list of ID values
ID values must be read in as a list
__author__ = "Luc Anselin <[email protected]> "
Arguments
---------
li : list of ID values
Returns
-------
a list with the duplicate IDs
"""
return list(set([x for x in li if li.count(x) > 1]))
def dbfdups(dbfpath,idvar):
"""checks duplicates in a dBase file
ID variable must be specified correctly
__author__ = "Luc Anselin <[email protected]> "
Arguments
---------
dbfpath : file path to dBase file
idvar : ID variable in dBase file
Returns
-------
a list with the duplicate IDs
"""
db = ps.open(dbfpath,'r')
li = db.by_col(idvar)
return list(set([x for x in li if li.count(x) > 1]))
def df2dbf(df, dbf_path, my_specs=None):
'''
Convert a pandas.DataFrame into a dbf.
__author__ = "Dani Arribas-Bel <[email protected]>, Luc Anselin <[email protected]>"
...
Arguments
---------
df : DataFrame
Pandas dataframe object to be entirely written out to a dbf
dbf_path : str
Path to the output dbf. It is also returned by the function
my_specs : list
List with the field_specs to use for each column.
Defaults to None and applies the following scheme:
* int: ('N', 14, 0) - for all ints
* float: ('N', 14, 14) - for all floats
* str: ('C', 14, 0) - for string, object and category
with all variants for different type sizes
Note: use of dtypes.name may not be fully robust, but preferred apprach of using
isinstance seems too clumsy
'''
if my_specs:
specs = my_specs
else:
"""
type2spec = {int: ('N', 20, 0),
np.int64: ('N', 20, 0),
np.int32: ('N', 20, 0),
np.int16: ('N', 20, 0),
np.int8: ('N', 20, 0),
float: ('N', 36, 15),
np.float64: ('N', 36, 15),
np.float32: ('N', 36, 15),
str: ('C', 14, 0)
}
types = [type(df[i].iloc[0]) for i in df.columns]
"""
# new approach using dtypes.name to avoid numpy name issue in type
type2spec = {'int': ('N', 20, 0),
'int8': ('N', 20, 0),
'int16': ('N', 20, 0),
'int32': ('N', 20, 0),
'int64': ('N', 20, 0),
'float': ('N', 36, 15),
'float32': ('N', 36, 15),
'float64': ('N', 36, 15),
'str': ('C', 14, 0),
'object': ('C', 14, 0),
'category': ('C', 14, 0)
}
types = [df[i].dtypes.name for i in df.columns]
specs = [type2spec[t] for t in types]
db = ps.open(dbf_path, 'w')
db.header = list(df.columns)
db.field_spec = specs
for i, row in df.T.iteritems():
db.write(row)
db.close()
return dbf_path
def dbf2df(dbf_path, index=None, cols=False, incl_index=False):
'''
Read a dbf file as a pandas.DataFrame, optionally selecting the index
variable and which columns are to be loaded.
__author__ = "Dani Arribas-Bel <[email protected]> "
...
Arguments
---------
dbf_path : str
Path to the DBF file to be read
index : str
Name of the column to be used as the index of the DataFrame
cols : list
List with the names of the columns to be read into the
DataFrame. Defaults to False, which reads the whole dbf
incl_index : Boolean
If True index is included in the DataFrame as a
column too. Defaults to False
Returns
-------
df : DataFrame
pandas.DataFrame object created
'''
db = ps.open(dbf_path)
if cols:
if incl_index:
cols.append(index)
vars_to_read = cols
else:
vars_to_read = db.header
data = dict([(var, db.by_col(var)) for var in vars_to_read])
if index:
index = db.by_col(index)
db.close()
return pd.DataFrame(data, index=index, columns=vars_to_read)
else:
db.close()
return pd.DataFrame(data,columns=vars_to_read)
def dbfjoin(dbf1_path,dbf2_path,out_path,joinkey1,joinkey2):
'''
Wrapper function to merge two dbf files into a new dbf file.
__author__ = "Luc Anselin <[email protected]> "
Uses dbf2df and df2dbf to read and write the dbf files into a pandas
DataFrame. Uses all default settings for dbf2df and df2dbf (see docs
for specifics).
...
Arguments
---------
dbf1_path : str
Path to the first (left) dbf file
dbf2_path : str
Path to the second (right) dbf file
out_path : str
Path to the output dbf file (returned by the function)
joinkey1 : str
Variable name for the key in the first dbf. Must be specified.
Key must take unique values.
joinkey2 : str
Variable name for the key in the second dbf. Must be specified.
Key must take unique values.
Returns
-------
dbfpath : path to output file
'''
df1 = dbf2df(dbf1_path,index=joinkey1)
df2 = dbf2df(dbf2_path,index=joinkey2)
dfbig = pd.merge(df1,df2,left_on=joinkey1,right_on=joinkey2,sort=False)
dp = df2dbf(dfbig,out_path)
return dp
def dta2dbf(dta_path,dbf_path):
"""
Wrapper function to convert a stata dta file into a dbf file.
__author__ = "Luc Anselin <[email protected]> "
Uses df2dbf to write the dbf files from a pandas
DataFrame. Uses all default settings for df2dbf (see docs
for specifics).
...
Arguments
---------
dta_path : str
Path to the Stata dta file
dbf_path : str
Path to the output dbf file
Returns
-------
dbf_path : path to output file
"""
db = pd.read_stata(dta_path)
dp = df2dbf(db,dbf_path)
return dp
| bsd-3-clause |
oxtopus/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pyplot.py | 69 | 77521 | import sys
import matplotlib
from matplotlib import _pylab_helpers, interactive
from matplotlib.cbook import dedent, silent_list, is_string_like, is_numlike
from matplotlib.figure import Figure, figaspect
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.image import imread as _imread
from matplotlib import rcParams, rcParamsDefault, get_backend
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for csv2rec in plotfile
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap
# We may not need the following imports here:
from matplotlib.colors import Normalize, normalize # latter for backwards compat.
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
## Backend detection ##
def _backend_selection():
""" If rcParams['backend_fallback'] is true, check to see if the
current backend is compatible with the current running event
loop, and if not switches to a compatible one.
"""
backend = rcParams['backend']
if not rcParams['backend_fallback'] or \
backend not in _interactive_bk:
return
is_agg_backend = rcParams['backend'].endswith('Agg')
if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
import wx
if wx.App.IsMainLoopRunning():
rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
elif 'qt' in sys.modules and not backend == 'QtAgg':
import qt
if not qt.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qtAgg'
elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
import PyQt4.QtGui
if not PyQt4.QtGui.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt4Agg'
elif 'gtk' in sys.modules and not backend in ('GTK', 'GTKAgg',
'GTKCairo'):
import gobject
if gobject.MainLoop().is_running():
rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
#import Tkinter
pass #what if anything do we need to do for tkinter?
_backend_selection()
## Global ##
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def findobj(o=None, match=None):
if o is None:
o = gcf()
return o.findobj(match)
findobj.__doc__ = Artist.findobj.__doc__
def switch_backend(newbackend):
"""
Switch the default backend to newbackend. This feature is
**experimental**, and is only expected to work switching to an
image backend. Eg, if you have a bunch of PostScript scripts that
you want to run from an interactive ipython session, you may want
to switch to the PS backend before running them to avoid having a
bunch of GUI windows popup. If you try to interactively switch
from one GUI backend to another, you will explode.
Calling this command will close all open windows.
"""
close('all')
global new_figure_manager, draw_if_interactive, show
matplotlib.use(newbackend, warn=False)
reload(matplotlib.backends)
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def isinteractive():
"""
Return the interactive status
"""
return matplotlib.is_interactive()
def ioff():
'Turn interactive mode off.'
matplotlib.interactive(False)
def ion():
'Turn interactive mode on.'
matplotlib.interactive(True)
def rc(*args, **kwargs):
matplotlib.rc(*args, **kwargs)
if matplotlib.rc.__doc__ is not None:
rc.__doc__ = dedent(matplotlib.rc.__doc__)
def rcdefaults():
matplotlib.rcdefaults()
draw_if_interactive()
if matplotlib.rcdefaults.__doc__ is not None:
rcdefaults.__doc__ = dedent(matplotlib.rcdefaults.__doc__)
# The current "image" (ScalarMappable) is tracked here on a
# per-pylab-session basis:
def gci():
"""
Get the current :class:`~matplotlib.cm.ScalarMappable` instance
(image or patch collection), or *None* if no images or patch
collections have been defined. The commands
:func:`~matplotlib.pyplot.imshow` and
:func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances.
"""
return gci._current
gci._current = None
def sci(im):
"""
Set the current image (target of colormap commands like
:func:`~matplotlib.pyplot.jet`, :func:`~matplotlib.pyplot.hot` or
:func:`~matplotlib.pyplot.clim`).
"""
gci._current = im
## Any Artist ##
# (getp is simply imported)
def setp(*args, **kwargs):
ret = _setp(*args, **kwargs)
draw_if_interactive()
return ret
if _setp.__doc__ is not None:
setp.__doc__ = _setp.__doc__
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize = None, # defaults to rc figure.figsize
dpi = None, # defaults to rc figure.dpi
facecolor = None, # defaults to rc figure.facecolor
edgecolor = None, # defaults to rc figure.edgecolor
frameon = True,
FigureClass = Figure,
**kwargs
):
"""
call signature::
figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
Create a new figure and return a :class:`matplotlib.figure.Figure`
instance. If *num* = *None*, the figure number will be incremented and
a new figure will be created. The returned figure objects have a
*number* attribute holding this number.
If *num* is an integer, and ``figure(num)`` already exists, make it
active and return the handle to it. If ``figure(num)`` does not exist
it will be created. Numbering starts at 1, matlab style::
figure(1)
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
figsize width x height in inches; defaults to rc figure.figsize
dpi resolution; defaults to rc figure.dpi
facecolor the background color; defaults to rc figure.facecolor
edgecolor the border color; defaults to rc figure.edgecolor
========= =======================================================
rcParams defines the default values, which can be modified in the
matplotlibrc file
*FigureClass* is a :class:`~matplotlib.figure.Figure` or derived
class that will be passed on to :meth:`new_figure_manager` in the
backends which allows you to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed on to your
figure init function.
"""
if figsize is None : figsize = rcParams['figure.figsize']
if dpi is None : dpi = rcParams['figure.dpi']
if facecolor is None : facecolor = rcParams['figure.facecolor']
if edgecolor is None : edgecolor = rcParams['figure.edgecolor']
if num is None:
allnums = [f.num for f in _pylab_helpers.Gcf.get_all_fig_managers()]
if allnums:
num = max(allnums) + 1
else:
num = 1
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
if get_backend().lower() == 'ps': dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
figManager.canvas.figure.number = num
draw_if_interactive()
return figManager.canvas.figure
def gcf():
"Return a handle to the current figure."
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
def get_current_fig_manager():
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
# note we check for __doc__ is not None since py2exe optimize removes
# the docstrings
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
if FigureCanvasBase.mpl_connect.__doc__ is not None:
connect.__doc__ = dedent(FigureCanvasBase.mpl_connect.__doc__)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
if FigureCanvasBase.mpl_disconnect.__doc__ is not None:
disconnect.__doc__ = dedent(FigureCanvasBase.mpl_disconnect.__doc__)
def close(*args):
"""
Close a figure window
``close()`` by itself closes the current figure
``close(num)`` closes figure number *num*
``close(h)`` where *h* is a :class:`Figure` instance, closes that figure
``close('all')`` closes all the figure windows
"""
if len(args)==0:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None: return
else:
figManager.canvas.mpl_disconnect(figManager._cidgcf)
_pylab_helpers.Gcf.destroy(figManager.num)
elif len(args)==1:
arg = args[0]
if arg=='all':
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
elif isinstance(arg, int):
_pylab_helpers.Gcf.destroy(arg)
elif isinstance(arg, Figure):
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
if manager.canvas.figure==arg:
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
else:
raise TypeError('Unrecognized argument type %s to close'%type(arg))
else:
raise TypeError('close takes 0 or 1 arguments')
def clf():
"""
Clear the current figure
"""
gcf().clf()
draw_if_interactive()
def draw():
'redraw the current figure'
get_current_fig_manager().canvas.draw()
def savefig(*args, **kwargs):
fig = gcf()
return fig.savefig(*args, **kwargs)
if Figure.savefig.__doc__ is not None:
savefig.__doc__ = dedent(Figure.savefig.__doc__)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
if Figure.ginput.__doc__ is not None:
ginput.__doc__ = dedent(Figure.ginput.__doc__)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
if Figure.waitforbuttonpress.__doc__ is not None:
waitforbuttonpress.__doc__ = dedent(Figure.waitforbuttonpress.__doc__)
# Putting things in figures
def figtext(*args, **kwargs):
ret = gcf().text(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.text.__doc__ is not None:
figtext.__doc__ = dedent(Figure.text.__doc__)
def suptitle(*args, **kwargs):
ret = gcf().suptitle(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.suptitle.__doc__ is not None:
suptitle.__doc__ = dedent(Figure.suptitle.__doc__)
def figimage(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
ret = gcf().figimage(*args, **kwargs)
draw_if_interactive()
gci._current = ret
return ret
if Figure.figimage.__doc__ is not None:
figimage.__doc__ = dedent(Figure.figimage.__doc__) + """
Addition kwargs: hold = [True|False] overrides default hold state"""
def figlegend(handles, labels, loc, **kwargs):
"""
Place a legend in the figure.
*labels*
a sequence of strings
*handles*
a sequence of :class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances
*loc*
can be a string or an integer specifying the legend
location
A :class:`matplotlib.legend.Legend` instance is returned.
Example::
figlegend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right' )
.. seealso::
:func:`~matplotlib.pyplot.legend`:
For information about the location codes
"""
l = gcf().legend(handles, labels, loc, **kwargs)
draw_if_interactive()
return l
## Figure and Axes hybrid ##
def hold(b=None):
"""
Set the hold state. If *b* is None (default), toggle the
hold state, else set the hold state to boolean value *b*::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
When *hold* is *True*, subsequent plot commands will be added to
the current axes. When *hold* is *False*, the current axes and
figure will be cleared on the next plot command.
"""
fig = gcf()
ax = fig.gca()
fig.hold(b)
ax.hold(b)
# b=None toggles the hold state, so let's get get the current hold
# state; but should pyplot hold toggle the rc setting - me thinks
# not
b = ax.ishold()
rc('axes', hold=b)
def ishold():
"""
Return the hold status of the current axes
"""
return gca().ishold()
def over(func, *args, **kwargs):
"""
over calls::
func(*args, **kwargs)
with ``hold(True)`` and then restores the hold state.
"""
h = ishold()
hold(True)
func(*args, **kwargs)
hold(h)
## Axes ##
def axes(*args, **kwargs):
"""
Add an axes at position rect specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============ ================================================
kwarg Accepts Desctiption
======= ============ ================================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute with otherax
sharey otherax current axes shares yaxis attribute with otherax
polar [True|False] use a polar axes?
======= ============ ================================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args)==0: return subplot(111, **kwargs)
if nargs>1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
draw_if_interactive()
return a
def delaxes(*args):
"""
``delaxes(ax)``: remove *ax* from the current figure. If *ax*
doesn't exist, an error will be raised.
``delaxes()``: delete the current axes
"""
if not len(args):
ax = gca()
else:
ax = args[0]
ret = gcf().delaxes(ax)
draw_if_interactive()
return ret
def gca(**kwargs):
"""
Return the current axis instance. This can be used to control
axis properties either using set or the
:class:`~matplotlib.axes.Axes` methods, for example, setting the
xaxis range::
plot(t,s)
set(gca(), 'xlim', [0,10])
or::
plot(t,s)
a = gca()
a.set_xlim([0,10])
"""
ax = gcf().gca(**kwargs)
return ax
# More ways of creating axes:
def subplot(*args, **kwargs):
"""
Create a subplot command, creating axes with::
subplot(numRows, numCols, plotNum)
where *plotNum* = 1 is the first plot number and increasing *plotNums*
fill rows first. max(*plotNum*) == *numRows* * *numCols*
You can leave out the commas if *numRows* <= *numCols* <=
*plotNum* < 10, as in::
subplot(211) # 2 rows, 1 column, first (upper) plot
``subplot(111)`` is the default axis.
New subplots that overlap old will delete the old axes. If you do
not want this behavior, use
:meth:`matplotlib.figure.Figure.add_subplot` or the
:func:`~matplotlib.pyplot.axes` command. Eg.::
from pylab import *
plot([1,2,3]) # implicitly creates subplot(111)
subplot(211) # overlaps, subplot(111) is killed
plot(rand(12), rand(12))
subplot(212, axisbg='y') # creates 2nd subplot with yellow background
Keyword arguments:
*axisbg*:
The background color of the subplot, which can be any valid
color specifier. See :mod:`matplotlib.colors` for more
information.
*polar*:
A boolean flag indicating whether the subplot plot should be
a polar projection. Defaults to False.
*projection*:
A string giving the name of a custom projection to be used
for the subplot. This projection must have been previously
registered. See :func:`matplotlib.projections.register_projection`
.. seealso::
:func:`~matplotlib.pyplot.axes`:
For additional information on :func:`axes` and
:func:`subplot` keyword arguments.
:file:`examples/pylab_examples/polar_scatter.py`
**Example:**
.. plot:: mpl_examples/pylab_examples/subplot_demo.py
"""
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def twinx(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the xaxis. The ticks for *ax2* will be placed on
the right, and the *ax2* instance is returned.
.. seealso::
:file:`examples/api_examples/two_scales.py`
"""
if ax is None:
ax=gca()
ax1 = ax.twinx()
draw_if_interactive()
return ax1
def twiny(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the yaxis. The ticks for *ax2* will be placed on
the top, and the *ax2* instance is returned.
"""
if ax is None:
ax=gca()
ax1 = ax.twiny()
draw_if_interactive()
return ax1
def subplots_adjust(*args, **kwargs):
"""
call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Tune the subplot layout via the
:class:`matplotlib.figure.SubplotParams` mechanism. The parameter
meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(*args, **kwargs)
draw_if_interactive()
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for *targetfig* (default gcf).
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure==targetfig: break
else: raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def box(on=None):
"""
Turn the axes box on or off according to *on*.
If *on* is *None*, toggle state.
"""
ax = gca()
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
draw_if_interactive()
def title(s, *args, **kwargs):
"""
Set the title of the current axis to *s*.
Default font override is::
override = {'fontsize': 'medium',
'verticalalignment': 'bottom',
'horizontalalignment': 'center'}
.. seealso::
:func:`~matplotlib.pyplot.text`:
for information on how override and the optional args work.
"""
l = gca().set_title(s, *args, **kwargs)
draw_if_interactive()
return l
## Axis ##
def axis(*v, **kwargs):
"""
Set/Get the axis properties:
>>> axis()
returns the current axes limits ``[xmin, xmax, ymin, ymax]``.
>>> axis(v)
sets the min and max of the x and y axes, with
``v = [xmin, xmax, ymin, ymax]``.
>>> axis('off')
turns off the axis lines and labels.
>>> axis('equal')
changes limits of *x* or *y* axis so that equal increments of *x*
and *y* have the same length; a circle is circular.
>>> axis('scaled')
achieves the same result by changing the dimensions of the plot box instead
of the axis data limits.
>>> axis('tight')
changes *x* and *y* axis limits such that all data is shown. If
all data is already shown, it will move it to the center of the
figure without modifying (*xmax* - *xmin*) or (*ymax* -
*ymin*). Note this is slightly different than in matlab.
>>> axis('image')
is 'scaled' with the axis limits equal to the data limits.
>>> axis('auto')
and
>>> axis('normal')
are deprecated. They restore default behavior; axis limits are automatically
scaled to make the data fit comfortably within the plot box.
if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
as kwargs selectively to alter just those limits without changing
the others.
The xmin, xmax, ymin, ymax tuple is returned
.. seealso::
:func:`xlim`, :func:`ylim`
"""
ax = gca()
v = ax.axis(*v, **kwargs)
draw_if_interactive()
return v
def xlabel(s, *args, **kwargs):
"""
Set the *x* axis label of the current axis to *s*
Default override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'top',
'horizontalalignment' : 'center'
}
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args work
"""
l = gca().set_xlabel(s, *args, **kwargs)
draw_if_interactive()
return l
def ylabel(s, *args, **kwargs):
"""
Set the *y* axis label of the current axis to *s*.
Defaults override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'center',
'horizontalalignment' : 'right',
'rotation'='vertical' : }
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args
work.
"""
l = gca().set_ylabel(s, *args, **kwargs)
draw_if_interactive()
return l
def xlim(*args, **kwargs):
"""
Set/Get the xlimits of the current axes::
xmin, xmax = xlim() # return the current xlim
xlim( (xmin, xmax) ) # set the xlim to xmin, xmax
xlim( xmin, xmax ) # set the xlim to xmin, xmax
If you do not specify args, you can pass the xmin and xmax as
kwargs, eg.::
xlim(xmax=3) # adjust the max leaving min unchanged
xlim(xmin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_xlim(*args, **kwargs)
draw_if_interactive()
return ret
def ylim(*args, **kwargs):
"""
Set/Get the ylimits of the current axes::
ymin, ymax = ylim() # return the current ylim
ylim( (ymin, ymax) ) # set the ylim to ymin, ymax
ylim( ymin, ymax ) # set the ylim to ymin, ymax
If you do not specify args, you can pass the *ymin* and *ymax* as
kwargs, eg.::
ylim(ymax=3) # adjust the max leaving min unchanged
ylim(ymin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_ylim(*args, **kwargs)
draw_if_interactive()
return ret
def xscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the x-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_xscale(*args, **kwargs)
draw_if_interactive()
return ret
xscale.__doc__ = dedent(xscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def yscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the y-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_yscale(*args, **kwargs)
draw_if_interactive()
return ret
yscale.__doc__ = dedent(yscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def xticks(*args, **kwargs):
"""
Set/Get the xlimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = xticks()
# set the locations of the xticks
xticks( arange(6) )
# set the locations and labels of the xticks
xticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif len(args)==1:
locs = ax.set_xticks(args[0])
labels = ax.get_xticklabels()
elif len(args)==2:
locs = ax.set_xticks(args[0])
labels = ax.set_xticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to xticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return locs, silent_list('Text xticklabel', labels)
def yticks(*args, **kwargs):
"""
Set/Get the ylimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = yticks()
# set the locations of the yticks
yticks( arange(6) )
# set the locations and labels of the yticks
yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif len(args)==1:
locs = ax.set_yticks(args[0])
labels = ax.get_yticklabels()
elif len(args)==2:
locs = ax.set_yticks(args[0])
labels = ax.set_yticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to yticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return ( locs,
silent_list('Text yticklabel', labels)
)
def rgrids(*args, **kwargs):
"""
Set/Get the radial locations of the gridlines and ticklabels on a
polar plot.
call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
When called with no arguments, :func:`rgrid` simply returns the
tuple (*lines*, *labels*), where *lines* is an array of radial
gridlines (:class:`~matplotlib.lines.Line2D` instances) and
*labels* is an array of tick labels
(:class:`~matplotlib.text.Text` instances). When called with
arguments, the labels will appear at the specified radial
distances and angles.
*labels*, if not *None*, is a len(*radii*) list of strings of the
labels to use at each angle.
If *labels* is None, the rformatter will be used
Examples::
# set the locations of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.yaxis.get_ticklines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
draw_if_interactive()
return ( silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels) )
def thetagrids(*args, **kwargs):
"""
Set/Get the theta locations of the gridlines and ticklabels.
If no arguments are passed, return a tuple (*lines*, *labels*)
where *lines* is an array of radial gridlines
(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
array of tick labels (:class:`~matplotlib.text.Text` instances)::
lines, labels = thetagrids()
Otherwise the syntax is::
lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
set the angles at which to place the theta grids (these gridlines
are equal along the theta dimension).
*angles* is in degrees.
*labels*, if not *None*, is a len(angles) list of strings of the
labels to use at each angle.
If *labels* is *None*, the labels will be ``fmt%angle``.
*frac* is the fraction of the polar axes radius at which to place
the label (1 is the edge). Eg. 1.05 is outside the axes and 0.95
is inside the axes.
Return value is a list of tuples (*lines*, *labels*):
- *lines* are :class:`~matplotlib.lines.Line2D` instances
- *labels* are :class:`~matplotlib.text.Text` instances.
Note that on input, the *labels* argument is a list of strings,
and on output it is a list of :class:`~matplotlib.text.Text`
instances.
Examples::
# set the locations of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
draw_if_interactive()
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels)
)
## Plotting Info ##
def plotting():
"""
Plotting commands
=============== =========================================================
Command Description
=============== =========================================================
axes Create a new axes
axis Set or return the current axis limits
bar make a bar chart
boxplot make a box and whiskers chart
cla clear current axes
clabel label a contour plot
clf clear a figure window
close close a figure window
colorbar add a colorbar to the current figure
cohere make a plot of coherence
contour make a contour plot
contourf make a filled contour plot
csd make a plot of cross spectral density
draw force a redraw of the current figure
errorbar make an errorbar graph
figlegend add a legend to the figure
figimage add an image to the figure, w/o resampling
figtext add text in figure coords
figure create or change active figure
fill make filled polygons
fill_between make filled polygons
gca return the current axes
gcf return the current figure
gci get the current image, or None
getp get a handle graphics property
hist make a histogram
hold set the hold state on current axes
legend add a legend to the axes
loglog a log log plot
imread load image file into array
imshow plot image data
matshow display a matrix in a new figure preserving aspect
pcolor make a pseudocolor plot
plot make a line plot
plotfile plot data from a flat file
psd make a plot of power spectral density
quiver make a direction field (arrows) plot
rc control the default params
savefig save the current figure
scatter make a scatter plot
setp set a handle graphics property
semilogx log x axis
semilogy log y axis
show show the figures
specgram a spectrogram plot
stem make a stem plot
subplot make a subplot (numrows, numcols, axesnum)
table add a table to the axes
text add some text at location x,y to the current axes
title add a title to the current axes
xlabel add an xlabel to the current axes
ylabel add a ylabel to the current axes
=============== =========================================================
The following commands will set the default colormap accordingly:
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
"""
pass
def get_plot_commands(): return ( 'axes', 'axis', 'bar', 'boxplot', 'cla', 'clf',
'close', 'colorbar', 'cohere', 'csd', 'draw', 'errorbar',
'figlegend', 'figtext', 'figimage', 'figure', 'fill', 'gca',
'gcf', 'gci', 'get', 'gray', 'barh', 'jet', 'hist', 'hold', 'imread',
'imshow', 'legend', 'loglog', 'quiver', 'rc', 'pcolor', 'pcolormesh', 'plot', 'psd',
'savefig', 'scatter', 'set', 'semilogx', 'semilogy', 'show',
'specgram', 'stem', 'subplot', 'table', 'text', 'title', 'xlabel',
'ylabel', 'pie', 'polar')
def colors():
"""
This is a do nothing function to provide you with help on how
matplotlib handles colors.
Commands which take color arguments can use several formats to
specify the colors. For the basic builtin colors, you can use a
single letter
===== =======
Alias Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
For a greater range of colors, you have two options. You can
specify the color using an html hex string, as in::
color = '#eeefff'
or you can pass an R,G,B tuple, where each of R,G,B are in the
range [0,1].
You can also use any legal html name for a color, for example::
color = 'red',
color = 'burlywood'
color = 'chartreuse'
The example below creates a subplot with a dark
slate gray background
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
Here is an example that creates a pale turqoise title::
title('Is this the best color?', color='#afeeee')
"""
pass
def colormaps():
"""
matplotlib provides the following colormaps.
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
You can set the colormap for an image, pcolor, scatter, etc,
either as a keyword argument::
imshow(X, cmap=cm.hot)
or post-hoc using the corresponding pylab interface function::
imshow(X)
hot()
jet()
In interactive mode, this will update the colormap allowing you to
see which one works best for your data.
"""
pass
## Plotting part 1: manually generated functions and wrappers ##
from matplotlib.colorbar import colorbar_doc
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
draw_if_interactive()
return ret
colorbar.__doc__ = colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image
To apply clim to all axes images do::
clim(0, 0.5)
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images,
use, for example::
for im in gca().get_images():
im.set_clim(0, 0.05)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, eg with imshow')
im.set_clim(vmin, vmax)
draw_if_interactive()
def imread(*args, **kwargs):
return _imread(*args, **kwargs)
if _imread.__doc__ is not None:
imread.__doc__ = dedent(_imread.__doc__)
def matshow(A, fignum=None, **kw):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
With the exception of fignum, keyword arguments are passed to
:func:`~matplotlib.pyplot.imshow`.
*fignum*: [ None | integer | False ]
By default, :func:`matshow` creates a new figure window with
automatic numbering. If *fignum* is given as an integer, the
created figure will use this figure number. Because of how
:func:`matshow` tries to set the figure aspect ratio to be the
one of the array, if you provide the number of an already
existing figure, strange things may happen.
If *fignum* is *False* or 0, a new figure window will **NOT** be created.
"""
if fignum is False or fignum is 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized figure
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kw)
gci._current = im
draw_if_interactive()
return im
def polar(*args, **kwargs):
"""
call signature::
polar(theta, r, **kwargs)
Make a polar plot. Multiple *theta*, *r* arguments are supported,
with format strings, as in :func:`~matplotlib.pyplot.plot`.
"""
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
**kwargs):
"""
Plot the data in *fname*
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, eg., integer
column numbers in both or column names in both.
*comments*, *skiprows*, *checkrows*, and *delimiter* are all passed on to
:func:`matplotlib.pylab.csv2rec` to load the data into a record array.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'), plotfuncs={'volume': 'semilogy'})
"""
fig = figure()
if len(cols)<1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = dict()
r = mlab.csv2rec(fname, comments=comments,
skiprows=skiprows, checkrows=checkrows, delimiter=delimiter)
def getname_val(identifier):
'return the name and column data for identifier'
if is_string_like(identifier):
return identifier, r[identifier]
elif is_numlike(identifier):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
if len(cols)==1:
ax1 = fig.add_subplot(1,1,1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_xlabel(xname)
else:
N = len(cols)
for i in range(1,N):
if i==1:
ax = ax1 = fig.add_subplot(N-1,1,i)
ax.grid(True)
else:
ax = fig.add_subplot(N-1,1,i, sharex=ax1)
ax.grid(True)
yname, y = getname_val(cols[i])
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if xname=='date':
fig.autofmt_xdate()
draw_if_interactive()
## Plotting part 2: autogenerated wrappers for axes methods ##
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def acorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().acorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.acorr.__doc__ is not None:
acorr.__doc__ = dedent(Axes.acorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def arrow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().arrow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.arrow.__doc__ is not None:
arrow.__doc__ = dedent(Axes.arrow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhline.__doc__ is not None:
axhline.__doc__ = dedent(Axes.axhline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhspan.__doc__ is not None:
axhspan.__doc__ = dedent(Axes.axhspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvline.__doc__ is not None:
axvline.__doc__ = dedent(Axes.axvline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvspan.__doc__ is not None:
axvspan.__doc__ = dedent(Axes.axvspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().bar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.bar.__doc__ is not None:
bar.__doc__ = dedent(Axes.bar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barh.__doc__ is not None:
barh.__doc__ = dedent(Axes.barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def broken_barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().broken_barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.broken_barh.__doc__ is not None:
broken_barh.__doc__ = dedent(Axes.broken_barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def boxplot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().boxplot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.boxplot.__doc__ is not None:
boxplot.__doc__ = dedent(Axes.boxplot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cohere(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().cohere(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.cohere.__doc__ is not None:
cohere.__doc__ = dedent(Axes.cohere.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def clabel(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().clabel(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.clabel.__doc__ is not None:
clabel.__doc__ = dedent(Axes.clabel.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contour(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contour(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contour.__doc__ is not None:
contour.__doc__ = dedent(Axes.contour.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contourf(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contourf(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contourf.__doc__ is not None:
contourf.__doc__ = dedent(Axes.contourf.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def csd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().csd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.csd.__doc__ is not None:
csd.__doc__ = dedent(Axes.csd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def errorbar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().errorbar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.errorbar.__doc__ is not None:
errorbar.__doc__ = dedent(Axes.errorbar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill.__doc__ is not None:
fill.__doc__ = dedent(Axes.fill.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill_between(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill_between(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill_between.__doc__ is not None:
fill_between.__doc__ = dedent(Axes.fill_between.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hexbin(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hexbin(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.hexbin.__doc__ is not None:
hexbin.__doc__ = dedent(Axes.hexbin.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hist(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hist(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hist.__doc__ is not None:
hist.__doc__ = dedent(Axes.hist.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hlines.__doc__ is not None:
hlines.__doc__ = dedent(Axes.hlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def imshow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().imshow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.imshow.__doc__ is not None:
imshow.__doc__ = dedent(Axes.imshow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def loglog(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().loglog(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.loglog.__doc__ is not None:
loglog.__doc__ = dedent(Axes.loglog.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolor(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolor(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolor.__doc__ is not None:
pcolor.__doc__ = dedent(Axes.pcolor.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolormesh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolormesh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolormesh.__doc__ is not None:
pcolormesh.__doc__ = dedent(Axes.pcolormesh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pie(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pie(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.pie.__doc__ is not None:
pie.__doc__ = dedent(Axes.pie.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot.__doc__ is not None:
plot.__doc__ = dedent(Axes.plot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot_date(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot_date(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot_date.__doc__ is not None:
plot_date.__doc__ = dedent(Axes.plot_date.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def psd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().psd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.psd.__doc__ is not None:
psd.__doc__ = dedent(Axes.psd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiver(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiver(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.quiver.__doc__ is not None:
quiver.__doc__ = dedent(Axes.quiver.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiverkey(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiverkey(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.quiverkey.__doc__ is not None:
quiverkey.__doc__ = dedent(Axes.quiverkey.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def scatter(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().scatter(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.scatter.__doc__ is not None:
scatter.__doc__ = dedent(Axes.scatter.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogx(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogx(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogx.__doc__ is not None:
semilogx.__doc__ = dedent(Axes.semilogx.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogy.__doc__ is not None:
semilogy.__doc__ = dedent(Axes.semilogy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def specgram(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().specgram(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret[-1]
hold(b)
return ret
if Axes.specgram.__doc__ is not None:
specgram.__doc__ = dedent(Axes.specgram.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().spy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.spy.__doc__ is not None:
spy.__doc__ = dedent(Axes.spy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def stem(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().stem(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.stem.__doc__ is not None:
stem.__doc__ = dedent(Axes.stem.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def step(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().step(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.step.__doc__ is not None:
step.__doc__ = dedent(Axes.step.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def vlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().vlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.vlines.__doc__ is not None:
vlines.__doc__ = dedent(Axes.vlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def xcorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().xcorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.xcorr.__doc__ is not None:
xcorr.__doc__ = dedent(Axes.xcorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barbs(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barbs(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barbs.__doc__ is not None:
barbs.__doc__ = dedent(Axes.barbs.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cla(*args, **kwargs):
ret = gca().cla(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.cla.__doc__ is not None:
cla.__doc__ = dedent(Axes.cla.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def grid(*args, **kwargs):
ret = gca().grid(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.grid.__doc__ is not None:
grid.__doc__ = dedent(Axes.grid.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def legend(*args, **kwargs):
ret = gca().legend(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.legend.__doc__ is not None:
legend.__doc__ = dedent(Axes.legend.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def table(*args, **kwargs):
ret = gca().table(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.table.__doc__ is not None:
table.__doc__ = dedent(Axes.table.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def text(*args, **kwargs):
ret = gca().text(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.text.__doc__ is not None:
text.__doc__ = dedent(Axes.text.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def annotate(*args, **kwargs):
ret = gca().annotate(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.annotate.__doc__ is not None:
annotate.__doc__ = dedent(Axes.annotate.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def autumn():
'''
set the default colormap to autumn and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='autumn')
im = gci()
if im is not None:
im.set_cmap(cm.autumn)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bone():
'''
set the default colormap to bone and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='bone')
im = gci()
if im is not None:
im.set_cmap(cm.bone)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cool():
'''
set the default colormap to cool and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='cool')
im = gci()
if im is not None:
im.set_cmap(cm.cool)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def copper():
'''
set the default colormap to copper and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='copper')
im = gci()
if im is not None:
im.set_cmap(cm.copper)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def flag():
'''
set the default colormap to flag and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='flag')
im = gci()
if im is not None:
im.set_cmap(cm.flag)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def gray():
'''
set the default colormap to gray and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='gray')
im = gci()
if im is not None:
im.set_cmap(cm.gray)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hot():
'''
set the default colormap to hot and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hot')
im = gci()
if im is not None:
im.set_cmap(cm.hot)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hsv():
'''
set the default colormap to hsv and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hsv')
im = gci()
if im is not None:
im.set_cmap(cm.hsv)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def jet():
'''
set the default colormap to jet and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='jet')
im = gci()
if im is not None:
im.set_cmap(cm.jet)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pink():
'''
set the default colormap to pink and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='pink')
im = gci()
if im is not None:
im.set_cmap(cm.pink)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def prism():
'''
set the default colormap to prism and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='prism')
im = gci()
if im is not None:
im.set_cmap(cm.prism)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spring():
'''
set the default colormap to spring and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spring')
im = gci()
if im is not None:
im.set_cmap(cm.spring)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def summer():
'''
set the default colormap to summer and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='summer')
im = gci()
if im is not None:
im.set_cmap(cm.summer)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def winter():
'''
set the default colormap to winter and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='winter')
im = gci()
if im is not None:
im.set_cmap(cm.winter)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spectral():
'''
set the default colormap to spectral and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spectral')
im = gci()
if im is not None:
im.set_cmap(cm.spectral)
draw_if_interactive()
| gpl-3.0 |
rhuelga/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/LPC.py | 2 | 1193 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
import essentia.standard as ess
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
lpc = ess.LPC(order=14)
N= 512
(fs, x) = UF.wavread('../../../sounds/soprano-E4.wav')
first = 20000
last = first+N
x1 = x[first:last]
X = fft(hamming(N)*x1)
mX = 20 * np.log10(abs(X[:N//2]))
coeff = lpc(x1)
Y = fft(coeff[0], N)
mY = 20 * np.log10(abs(Y[:N//2]))
plt.figure(1, figsize=(9, 5))
plt.subplot(2,1,1)
plt.plot(np.arange(first, last)/float(fs), x[first:last], 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(x[first:last]), max(x[first:last])])
plt.title('x (soprano-E4.wav)')
plt.subplot(2,1,2)
plt.plot(np.arange(0, fs/2.0, fs/float(N)), mX-max(mX), 'r', lw=1.5, label="mX")
plt.plot(np.arange(0, fs/2.0, fs/float(N)), -mY-max(-mY)-3, 'k', lw=1.5, label="mY")
plt.legend()
plt.axis([0, fs/2, -60, 3])
plt.title('mX + mY (LPC approximation)')
plt.tight_layout()
plt.savefig('LPC.png')
plt.show()
| agpl-3.0 |
kou/arrow | python/pyarrow/tests/test_extension_type.py | 3 | 20909 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pickle
import weakref
import numpy as np
import pyarrow as pa
import pytest
class IntegerType(pa.PyExtensionType):
def __init__(self):
pa.PyExtensionType.__init__(self, pa.int64())
def __reduce__(self):
return IntegerType, ()
class UuidType(pa.PyExtensionType):
def __init__(self):
pa.PyExtensionType.__init__(self, pa.binary(16))
def __reduce__(self):
return UuidType, ()
class ParamExtType(pa.PyExtensionType):
def __init__(self, width):
self._width = width
pa.PyExtensionType.__init__(self, pa.binary(width))
@property
def width(self):
return self._width
def __reduce__(self):
return ParamExtType, (self.width,)
class MyStructType(pa.PyExtensionType):
storage_type = pa.struct([('left', pa.int64()),
('right', pa.int64())])
def __init__(self):
pa.PyExtensionType.__init__(self, self.storage_type)
def __reduce__(self):
return MyStructType, ()
class MyListType(pa.PyExtensionType):
def __init__(self, storage_type):
pa.PyExtensionType.__init__(self, storage_type)
def __reduce__(self):
return MyListType, (self.storage_type,)
def ipc_write_batch(batch):
stream = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
writer.close()
return stream.getvalue()
def ipc_read_batch(buf):
reader = pa.RecordBatchStreamReader(buf)
return reader.read_next_batch()
def test_ext_type_basics():
ty = UuidType()
assert ty.extension_name == "arrow.py_extension_type"
def test_ext_type_str():
ty = IntegerType()
expected = "extension<arrow.py_extension_type<IntegerType>>"
assert str(ty) == expected
assert pa.DataType.__str__(ty) == expected
def test_ext_type_repr():
ty = IntegerType()
assert repr(ty) == "IntegerType(DataType(int64))"
def test_ext_type__lifetime():
ty = UuidType()
wr = weakref.ref(ty)
del ty
assert wr() is None
def test_ext_type__storage_type():
ty = UuidType()
assert ty.storage_type == pa.binary(16)
assert ty.__class__ is UuidType
ty = ParamExtType(5)
assert ty.storage_type == pa.binary(5)
assert ty.__class__ is ParamExtType
def test_uuid_type_pickle():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
ty = UuidType()
ser = pickle.dumps(ty, protocol=proto)
del ty
ty = pickle.loads(ser)
wr = weakref.ref(ty)
assert ty.extension_name == "arrow.py_extension_type"
del ty
assert wr() is None
def test_ext_type_equality():
a = ParamExtType(5)
b = ParamExtType(6)
c = ParamExtType(6)
assert a != b
assert b == c
d = UuidType()
e = UuidType()
assert a != d
assert d == e
def test_ext_array_basics():
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
arr.validate()
assert arr.type is ty
assert arr.storage.equals(storage)
def test_ext_array_lifetime():
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
refs = [weakref.ref(ty), weakref.ref(arr), weakref.ref(storage)]
del ty, storage, arr
for ref in refs:
assert ref() is None
def test_ext_array_errors():
ty = ParamExtType(4)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
with pytest.raises(TypeError, match="Incompatible storage type"):
pa.ExtensionArray.from_storage(ty, storage)
def test_ext_array_equality():
storage1 = pa.array([b"0123456789abcdef"], type=pa.binary(16))
storage2 = pa.array([b"0123456789abcdef"], type=pa.binary(16))
storage3 = pa.array([], type=pa.binary(16))
ty1 = UuidType()
ty2 = ParamExtType(16)
a = pa.ExtensionArray.from_storage(ty1, storage1)
b = pa.ExtensionArray.from_storage(ty1, storage2)
assert a.equals(b)
c = pa.ExtensionArray.from_storage(ty1, storage3)
assert not a.equals(c)
d = pa.ExtensionArray.from_storage(ty2, storage1)
assert not a.equals(d)
e = pa.ExtensionArray.from_storage(ty2, storage2)
assert d.equals(e)
f = pa.ExtensionArray.from_storage(ty2, storage3)
assert not d.equals(f)
def test_ext_array_pickling():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
ser = pickle.dumps(arr, protocol=proto)
del ty, storage, arr
arr = pickle.loads(ser)
arr.validate()
assert isinstance(arr, pa.ExtensionArray)
assert arr.type == ParamExtType(3)
assert arr.type.storage_type == pa.binary(3)
assert arr.storage.type == pa.binary(3)
assert arr.storage.to_pylist() == [b"foo", b"bar"]
def test_ext_array_conversion_to_numpy():
storage1 = pa.array([1, 2, 3], type=pa.int64())
storage2 = pa.array([b"123", b"456", b"789"], type=pa.binary(3))
ty1 = IntegerType()
ty2 = ParamExtType(3)
arr1 = pa.ExtensionArray.from_storage(ty1, storage1)
arr2 = pa.ExtensionArray.from_storage(ty2, storage2)
result = arr1.to_numpy()
expected = np.array([1, 2, 3], dtype="int64")
np.testing.assert_array_equal(result, expected)
with pytest.raises(ValueError, match="zero_copy_only was True"):
arr2.to_numpy()
result = arr2.to_numpy(zero_copy_only=False)
expected = np.array([b"123", b"456", b"789"])
np.testing.assert_array_equal(result, expected)
@pytest.mark.pandas
def test_ext_array_conversion_to_pandas():
import pandas as pd
storage1 = pa.array([1, 2, 3], type=pa.int64())
storage2 = pa.array([b"123", b"456", b"789"], type=pa.binary(3))
ty1 = IntegerType()
ty2 = ParamExtType(3)
arr1 = pa.ExtensionArray.from_storage(ty1, storage1)
arr2 = pa.ExtensionArray.from_storage(ty2, storage2)
result = arr1.to_pandas()
expected = pd.Series([1, 2, 3], dtype="int64")
pd.testing.assert_series_equal(result, expected)
result = arr2.to_pandas()
expected = pd.Series([b"123", b"456", b"789"], dtype=object)
pd.testing.assert_series_equal(result, expected)
def test_cast_kernel_on_extension_arrays():
# test array casting
storage = pa.array([1, 2, 3, 4], pa.int64())
arr = pa.ExtensionArray.from_storage(IntegerType(), storage)
# test that no allocation happens during identity cast
allocated_before_cast = pa.total_allocated_bytes()
casted = arr.cast(pa.int64())
assert pa.total_allocated_bytes() == allocated_before_cast
cases = [
(pa.int64(), pa.Int64Array),
(pa.int32(), pa.Int32Array),
(pa.int16(), pa.Int16Array),
(pa.uint64(), pa.UInt64Array),
(pa.uint32(), pa.UInt32Array),
(pa.uint16(), pa.UInt16Array)
]
for typ, klass in cases:
casted = arr.cast(typ)
assert casted.type == typ
assert isinstance(casted, klass)
# test chunked array casting
arr = pa.chunked_array([arr, arr])
casted = arr.cast(pa.int16())
assert casted.type == pa.int16()
assert isinstance(casted, pa.ChunkedArray)
def test_casting_to_extension_type_raises():
arr = pa.array([1, 2, 3, 4], pa.int64())
with pytest.raises(pa.ArrowNotImplementedError):
arr.cast(IntegerType())
def example_batch():
ty = ParamExtType(3)
storage = pa.array([b"foo", b"bar"], type=pa.binary(3))
arr = pa.ExtensionArray.from_storage(ty, storage)
return pa.RecordBatch.from_arrays([arr], ["exts"])
def check_example_batch(batch):
arr = batch.column(0)
assert isinstance(arr, pa.ExtensionArray)
assert arr.type.storage_type == pa.binary(3)
assert arr.storage.to_pylist() == [b"foo", b"bar"]
return arr
def test_ipc():
batch = example_batch()
buf = ipc_write_batch(batch)
del batch
batch = ipc_read_batch(buf)
arr = check_example_batch(batch)
assert arr.type == ParamExtType(3)
def test_ipc_unknown_type():
batch = example_batch()
buf = ipc_write_batch(batch)
del batch
orig_type = ParamExtType
try:
# Simulate the original Python type being unavailable.
# Deserialization should not fail but return a placeholder type.
del globals()['ParamExtType']
batch = ipc_read_batch(buf)
arr = check_example_batch(batch)
assert isinstance(arr.type, pa.UnknownExtensionType)
# Can be serialized again
buf2 = ipc_write_batch(batch)
del batch, arr
batch = ipc_read_batch(buf2)
arr = check_example_batch(batch)
assert isinstance(arr.type, pa.UnknownExtensionType)
finally:
globals()['ParamExtType'] = orig_type
# Deserialize again with the type restored
batch = ipc_read_batch(buf2)
arr = check_example_batch(batch)
assert arr.type == ParamExtType(3)
class PeriodArray(pa.ExtensionArray):
pass
class PeriodType(pa.ExtensionType):
def __init__(self, freq):
# attributes need to be set first before calling
# super init (as that calls serialize)
self._freq = freq
pa.ExtensionType.__init__(self, pa.int64(), 'test.period')
@property
def freq(self):
return self._freq
def __arrow_ext_serialize__(self):
return "freq={}".format(self.freq).encode()
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized):
serialized = serialized.decode()
assert serialized.startswith("freq=")
freq = serialized.split('=')[1]
return PeriodType(freq)
def __eq__(self, other):
if isinstance(other, pa.BaseExtensionType):
return (type(self) == type(other) and
self.freq == other.freq)
else:
return NotImplemented
class PeriodTypeWithClass(PeriodType):
def __init__(self, freq):
PeriodType.__init__(self, freq)
def __arrow_ext_class__(self):
return PeriodArray
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized):
freq = PeriodType.__arrow_ext_deserialize__(
storage_type, serialized).freq
return PeriodTypeWithClass(freq)
@pytest.fixture(params=[PeriodType('D'), PeriodTypeWithClass('D')])
def registered_period_type(request):
# setup
period_type = request.param
period_class = period_type.__arrow_ext_class__()
pa.register_extension_type(period_type)
yield period_type, period_class
# teardown
try:
pa.unregister_extension_type('test.period')
except KeyError:
pass
def test_generic_ext_type():
period_type = PeriodType('D')
assert period_type.extension_name == "test.period"
assert period_type.storage_type == pa.int64()
# default ext_class expected.
assert period_type.__arrow_ext_class__() == pa.ExtensionArray
def test_generic_ext_type_ipc(registered_period_type):
period_type, period_class = registered_period_type
storage = pa.array([1, 2, 3, 4], pa.int64())
arr = pa.ExtensionArray.from_storage(period_type, storage)
batch = pa.RecordBatch.from_arrays([arr], ["ext"])
# check the built array has exactly the expected clss
assert type(arr) == period_class
buf = ipc_write_batch(batch)
del batch
batch = ipc_read_batch(buf)
result = batch.column(0)
# check the deserialized array class is the expected one
assert type(result) == period_class
assert result.type.extension_name == "test.period"
assert arr.storage.to_pylist() == [1, 2, 3, 4]
# we get back an actual PeriodType
assert isinstance(result.type, PeriodType)
assert result.type.freq == 'D'
assert result.type == period_type
# using different parametrization as how it was registered
period_type_H = period_type.__class__('H')
assert period_type_H.extension_name == "test.period"
assert period_type_H.freq == 'H'
arr = pa.ExtensionArray.from_storage(period_type_H, storage)
batch = pa.RecordBatch.from_arrays([arr], ["ext"])
buf = ipc_write_batch(batch)
del batch
batch = ipc_read_batch(buf)
result = batch.column(0)
assert isinstance(result.type, PeriodType)
assert result.type.freq == 'H'
assert type(result) == period_class
def test_generic_ext_type_ipc_unknown(registered_period_type):
period_type, _ = registered_period_type
storage = pa.array([1, 2, 3, 4], pa.int64())
arr = pa.ExtensionArray.from_storage(period_type, storage)
batch = pa.RecordBatch.from_arrays([arr], ["ext"])
buf = ipc_write_batch(batch)
del batch
# unregister type before loading again => reading unknown extension type
# as plain array (but metadata in schema's field are preserved)
pa.unregister_extension_type('test.period')
batch = ipc_read_batch(buf)
result = batch.column(0)
assert isinstance(result, pa.Int64Array)
ext_field = batch.schema.field('ext')
assert ext_field.metadata == {
b'ARROW:extension:metadata': b'freq=D',
b'ARROW:extension:name': b'test.period'
}
def test_generic_ext_type_equality():
period_type = PeriodType('D')
assert period_type.extension_name == "test.period"
period_type2 = PeriodType('D')
period_type3 = PeriodType('H')
assert period_type == period_type2
assert not period_type == period_type3
def test_generic_ext_type_register(registered_period_type):
# test that trying to register other type does not segfault
with pytest.raises(TypeError):
pa.register_extension_type(pa.string())
# register second time raises KeyError
period_type = PeriodType('D')
with pytest.raises(KeyError):
pa.register_extension_type(period_type)
@pytest.mark.parquet
def test_parquet_period(tmpdir, registered_period_type):
# Parquet support for primitive extension types
period_type, period_class = registered_period_type
storage = pa.array([1, 2, 3, 4], pa.int64())
arr = pa.ExtensionArray.from_storage(period_type, storage)
table = pa.table([arr], names=["ext"])
import pyarrow.parquet as pq
filename = tmpdir / 'period_extension_type.parquet'
pq.write_table(table, filename)
# Stored in parquet as storage type but with extension metadata saved
# in the serialized arrow schema
meta = pq.read_metadata(filename)
assert meta.schema.column(0).physical_type == "INT64"
assert b"ARROW:schema" in meta.metadata
import base64
decoded_schema = base64.b64decode(meta.metadata[b"ARROW:schema"])
schema = pa.ipc.read_schema(pa.BufferReader(decoded_schema))
# Since the type could be reconstructed, the extension type metadata is
# absent.
assert schema.field("ext").metadata == {}
# When reading in, properly create extension type if it is registered
result = pq.read_table(filename)
assert result.schema.field("ext").type == period_type
assert result.schema.field("ext").metadata == {}
# Get the exact array class defined by the registered type.
result_array = result.column("ext").chunk(0)
assert type(result_array) is period_class
# When the type is not registered, read in as storage type
pa.unregister_extension_type(period_type.extension_name)
result = pq.read_table(filename)
assert result.schema.field("ext").type == pa.int64()
# The extension metadata is present for roundtripping.
assert result.schema.field("ext").metadata == {
b'ARROW:extension:metadata': b'freq=D',
b'ARROW:extension:name': b'test.period'
}
@pytest.mark.parquet
def test_parquet_extension_with_nested_storage(tmpdir):
# Parquet support for extension types with nested storage type
import pyarrow.parquet as pq
struct_array = pa.StructArray.from_arrays(
[pa.array([0, 1], type="int64"), pa.array([4, 5], type="int64")],
names=["left", "right"])
list_array = pa.array([[1, 2, 3], [4, 5]], type=pa.list_(pa.int32()))
mystruct_array = pa.ExtensionArray.from_storage(MyStructType(),
struct_array)
mylist_array = pa.ExtensionArray.from_storage(
MyListType(list_array.type), list_array)
orig_table = pa.table({'structs': mystruct_array,
'lists': mylist_array})
filename = tmpdir / 'nested_extension_storage.parquet'
pq.write_table(orig_table, filename)
table = pq.read_table(filename)
assert table.column('structs').type == mystruct_array.type
assert table.column('lists').type == mylist_array.type
assert table == orig_table
@pytest.mark.parquet
def test_parquet_nested_extension(tmpdir):
# Parquet support for extension types nested in struct or list
import pyarrow.parquet as pq
ext_type = IntegerType()
storage = pa.array([4, 5, 6, 7], type=pa.int64())
ext_array = pa.ExtensionArray.from_storage(ext_type, storage)
# Struct of extensions
struct_array = pa.StructArray.from_arrays(
[storage, ext_array],
names=['ints', 'exts'])
orig_table = pa.table({'structs': struct_array})
filename = tmpdir / 'struct_of_ext.parquet'
pq.write_table(orig_table, filename)
table = pq.read_table(filename)
assert table.column(0).type == struct_array.type
assert table == orig_table
# List of extensions
list_array = pa.ListArray.from_arrays([0, 1, None, 3], ext_array)
orig_table = pa.table({'lists': list_array})
filename = tmpdir / 'list_of_ext.parquet'
pq.write_table(orig_table, filename)
table = pq.read_table(filename)
assert table.column(0).type == list_array.type
assert table == orig_table
# Large list of extensions
list_array = pa.LargeListArray.from_arrays([0, 1, None, 3], ext_array)
orig_table = pa.table({'lists': list_array})
filename = tmpdir / 'list_of_ext.parquet'
pq.write_table(orig_table, filename)
table = pq.read_table(filename)
assert table.column(0).type == list_array.type
assert table == orig_table
@pytest.mark.parquet
def test_parquet_extension_nested_in_extension(tmpdir):
# Parquet support for extension<list<extension>>
import pyarrow.parquet as pq
inner_ext_type = IntegerType()
inner_storage = pa.array([4, 5, 6, 7], type=pa.int64())
inner_ext_array = pa.ExtensionArray.from_storage(inner_ext_type,
inner_storage)
list_array = pa.ListArray.from_arrays([0, 1, None, 3], inner_ext_array)
mylist_array = pa.ExtensionArray.from_storage(
MyListType(list_array.type), list_array)
orig_table = pa.table({'lists': mylist_array})
filename = tmpdir / 'ext_of_list_of_ext.parquet'
pq.write_table(orig_table, filename)
table = pq.read_table(filename)
assert table.column(0).type == mylist_array.type
assert table == orig_table
def test_to_numpy():
period_type = PeriodType('D')
storage = pa.array([1, 2, 3, 4], pa.int64())
arr = pa.ExtensionArray.from_storage(period_type, storage)
expected = storage.to_numpy()
result = arr.to_numpy()
np.testing.assert_array_equal(result, expected)
result = np.asarray(arr)
np.testing.assert_array_equal(result, expected)
# chunked array
a1 = pa.chunked_array([arr, arr])
a2 = pa.chunked_array([arr, arr], type=period_type)
expected = np.hstack([expected, expected])
for charr in [a1, a2]:
assert charr.type == period_type
for result in [np.asarray(charr), charr.to_numpy()]:
assert result.dtype == np.int64
np.testing.assert_array_equal(result, expected)
# zero chunks
charr = pa.chunked_array([], type=period_type)
assert charr.type == period_type
for result in [np.asarray(charr), charr.to_numpy()]:
assert result.dtype == np.int64
np.testing.assert_array_equal(result, np.array([], dtype='int64'))
| apache-2.0 |
numenta-archive/htmresearch | projects/capybara/sandbox/sklearn/run_baseline.py | 9 | 2773 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
from sklearn.neural_network import MLPClassifier
from htmresearch.frameworks.classification.utils.traces import loadTraces
from utils import get_file_name, convert_to_sdrs
def load_sdrs(start_idx, end_idx, exp_name):
# Params
input_width = 2048 * 32
active_cells_weight = 0
predicted_active_cells_weight = 1
network_config = 'sp=True_tm=True_tp=False_SDRClassifier'
# load traces
file_name = get_file_name(exp_name, network_config)
traces = loadTraces(file_name)
num_records = len(traces['sensorValue'])
# start and end
if start_idx < 0:
start = num_records + start_idx
else:
start = start_idx
if end_idx < 0:
end = num_records + end_idx
else:
end = end_idx
# input data
sensor_values = traces['sensorValue'][start:end]
categories = traces['actualCategory'][start:end]
active_cells = traces['tmActiveCells'][start:end]
predicted_active_cells = traces['tmPredictedActiveCells'][start:end]
# generate sdrs to cluster
active_cells_sdrs = convert_to_sdrs(active_cells, input_width)
predicted_active_cells_sdrs = np.array(
convert_to_sdrs(predicted_active_cells, input_width))
sdrs = (float(active_cells_weight) * np.array(active_cells_sdrs) +
float(predicted_active_cells_weight) * predicted_active_cells_sdrs)
return sdrs, categories
def train_model(X, y):
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(5, 2), random_state=1)
clf.fit(X, y)
return clf
if __name__ == "__main__":
exp_name = '1x.40000.body_acc_x'
start_idx = 600
end_idx = 800
sdrs, categories = load_sdrs(start_idx, end_idx, exp_name)
clf = train_model(sdrs, categories)
predictions = clf.predict([sdrs[0], sdrs[1]])
print "Predictions: %s" % predictions
| agpl-3.0 |
tangyouze/tushare | tushare/datayes/idx.py | 17 | 1509 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Idx():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def Idx(self, secID='', ticker='', field=''):
"""
获取国内外指数的基本要素信息,包括指数名称、指数代码、发布机构、发布日期、基日、基点等。
"""
code, result = self.client.getData(vs.IDX%(secID, ticker, field))
return _ret_data(code, result)
def IdxCons(self, secID='', ticker='', intoDate='', isNew='', field=''):
"""
获取国内外指数的成分构成情况,包括指数成分股名称、成分股代码、入选日期、剔除日期等。
"""
code, result = self.client.getData(vs.IDXCONS%(secID, ticker, intoDate,
intoDate, isNew, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
dotsdl/msmbuilder | msmbuilder/tests/test_commands.py | 1 | 7972 | from __future__ import print_function, division
import os
import sys
import json
import glob
import shlex
import itertools
import tempfile
import shutil
import subprocess
import numpy as np
import mdtraj as md
from mdtraj.testing import eq
from mdtraj.testing import get_fn as get_mdtraj_fn, skipif
from msmbuilder.utils import load
from msmbuilder.dataset import dataset
from msmbuilder.example_datasets import get_data_home
from msmbuilder.example_datasets.alanine_dipeptide import fetch_alanine_dipeptide
import sklearn.hmm
DATADIR = HMM = None
################################################################################
# Fixtures
################################################################################
def setup_module():
global DATADIR, HMM
DATADIR = tempfile.mkdtemp()
# 4 components and 3 features. Each feature is going to be the x, y, z
# coordinate of 1 atom
HMM = sklearn.hmm.GaussianHMM(n_components=4)
HMM.transmat_ = np.array([[0.9, 0.1, 0.0, 0.0],
[0.1, 0.7, 0.2, 0.0],
[0.0, 0.1, 0.8, 0.1],
[0.0, 0.1, 0.1, 0.8]])
HMM.means_ = np.array([[-10, -10, -10],
[-5, -5, -5],
[5, 5, 5],
[10, 10, 10]])
HMM.covars_ = np.array([[0.1, 0.1, 0.1],
[0.5, 0.5, 0.5],
[1, 1, 1],
[4, 4, 4]])
HMM.startprob_ = np.array([1, 1, 1, 1]) / 4.0
# get a 1 atom topology
topology = md.load(get_mdtraj_fn('native.pdb')).restrict_atoms([1]).topology
# generate the trajectories and save them to disk
for i in range(10):
d, s = HMM.sample(100)
t = md.Trajectory(xyz=d.reshape(len(d), 1, 3), topology=topology)
t.save(os.path.join(DATADIR, 'Trajectory%d.h5' % i))
fetch_alanine_dipeptide()
def teardown_module():
shutil.rmtree(DATADIR)
class tempdir(object):
def __enter__(self):
self._curdir = os.path.abspath(os.curdir)
self._tempdir = tempfile.mkdtemp()
os.chdir(self._tempdir)
def __exit__(self, *exc_info):
os.chdir(self._curdir)
shutil.rmtree(self._tempdir)
def shell(str):
# Capture stdout
if sys.platform == 'win32':
split = str.split()
else:
split = shlex.split(str)
print(split)
with open(os.devnull, 'w') as noout:
assert subprocess.call(split, stdout=noout) == 0
################################################################################
# Tests
################################################################################
def test_atomindices():
fn = get_mdtraj_fn('2EQQ.pdb')
t = md.load(fn)
with tempdir():
shell('msmb AtomIndices -o all.txt --all -a -p %s' % fn)
shell('msmb AtomIndices -o all-pairs.txt --all -d -p %s' % fn)
atoms = np.loadtxt('all.txt', int)
pairs = np.loadtxt('all-pairs.txt', int)
eq(t.n_atoms, len(atoms))
eq(int(t.n_atoms * (t.n_atoms-1) / 2), len(pairs))
with tempdir():
shell('msmb AtomIndices -o heavy.txt --heavy -a -p %s' % fn)
shell('msmb AtomIndices -o heavy-pairs.txt --heavy -d -p %s' % fn)
atoms = np.loadtxt('heavy.txt', int)
pairs = np.loadtxt('heavy-pairs.txt', int)
assert all(t.topology.atom(i).element.symbol != 'H' for i in atoms)
assert sum(1 for a in t.topology.atoms if a.element.symbol != 'H') == len(atoms)
eq(np.array(list(itertools.combinations(atoms, 2))), pairs)
with tempdir():
shell('msmb AtomIndices -o alpha.txt --alpha -a -p %s' % fn)
shell('msmb AtomIndices -o alpha-pairs.txt --alpha -d -p %s' % fn)
atoms = np.loadtxt('alpha.txt', int)
pairs = np.loadtxt('alpha-pairs.txt', int)
assert all(t.topology.atom(i).name == 'CA' for i in atoms)
assert sum(1 for a in t.topology.atoms if a.name == 'CA') == len(atoms)
eq(np.array(list(itertools.combinations(atoms, 2))), pairs)
with tempdir():
shell('msmb AtomIndices -o minimal.txt --minimal -a -p %s' % fn)
shell('msmb AtomIndices -o minimal-pairs.txt --minimal -d -p %s' % fn)
atoms = np.loadtxt('minimal.txt', int)
pairs = np.loadtxt('minimal-pairs.txt', int)
assert all(t.topology.atom(i).name in ['CA', 'CB', 'C', 'N' , 'O'] for i in atoms)
eq(np.array(list(itertools.combinations(atoms, 2))), pairs)
def test_superpose_featurizer():
with tempdir():
shell('msmb AtomIndices -o all.txt --all -a -p %s/alanine_dipeptide/ala2.pdb' % get_data_home()),
shell("msmb SuperposeFeaturizer --trjs '{data_home}/alanine_dipeptide/*.dcd'"
" --transformed distances --atom_indices all.txt"
" --reference_traj {data_home}/alanine_dipeptide/ala2.pdb"
" --top {data_home}/alanine_dipeptide/ala2.pdb".format(
data_home=get_data_home()))
ds = dataset('distances')
assert len(ds) == 10
assert ds[0].shape[1] == len(np.loadtxt('all.txt'))
print(ds.provenance)
def test_atom_pairs_featurizer():
with tempdir():
shell('msmb AtomIndices -o all.txt --all -d -p %s/alanine_dipeptide/ala2.pdb' % get_data_home()),
shell("msmb AtomPairsFeaturizer --trjs '{data_home}/alanine_dipeptide/*.dcd'"
" --transformed pairs --pair_indices all.txt"
" --top {data_home}/alanine_dipeptide/ala2.pdb".format(
data_home=get_data_home()))
ds = dataset('pairs')
assert len(ds) == 10
assert ds[0].shape[1] == len(np.loadtxt('all.txt')**2)
print(ds.provenance)
def test_transform_command_1():
with tempdir():
shell("msmb KCenters -i {data_home}/alanine_dipeptide/*.dcd "
"-o model.pkl --top {data_home}/alanine_dipeptide/ala2.pdb "
"--metric rmsd".format(data_home=get_data_home()))
shell("msmb TransformDataset -i {data_home}/alanine_dipeptide/*.dcd "
"-m model.pkl -t transformed.h5 --top "
"{data_home}/alanine_dipeptide/ala2.pdb".format(data_home=get_data_home()))
eq(dataset('transformed.h5')[0], load('model.pkl').labels_[0])
with tempdir():
shell("msmb KCenters -i {data_home}/alanine_dipeptide/trajectory_0.dcd "
"-o model.pkl --top {data_home}/alanine_dipeptide/ala2.pdb "
"--metric rmsd".format(data_home=get_data_home()))
def test_transform_command_2():
def test_transform_command_1():
with tempdir():
shell("msmb KCenters -i {data_home}/alanine_dipeptide/trajectory_0.dcd "
"-o model.pkl --top {data_home}/alanine_dipeptide/ala2.pdb "
"--metric rmsd "
"--stride 2".format(data_home=get_data_home()))
def test_help():
shell('msmb -h')
def test_convert_chunked_project_1():
fetch_alanine_dipeptide()
with tempdir():
root = os.path.join(get_data_home(), 'alanine_dipeptide')
if sys.platform == 'win32':
pattern = "*.dcd"
else:
pattern = "'*.dcd'"
cmd = 'msmb ConvertChunkedProject out {root} --pattern {pattern} -t {root}/ala2.pdb'.format(root=root, pattern=pattern)
shell(cmd)
assert set(os.listdir('out')) == set(('traj-00000000.dcd', 'trajectories.jsonl'))
# check that out/traj-00000.dcd really has concatenated all of
# the input trajs
length = len(md.open('out/traj-00000000.dcd'))
assert length == sum(len(md.open(f)) for f in glob.glob('%s/*.dcd' % root))
with open('out/trajectories.jsonl') as f:
record = json.load(f)
assert set(record.keys()) == set(('filename', 'chunks'))
assert record['filename'] == 'traj-00000000.dcd'
assert sorted(glob.glob('%s/*.dcd' % root)) == record['chunks']
| lgpl-2.1 |
jstraub/easyRobo | rangeSensor.py | 1 | 4338 | # Copyright (c) 2012, Julian Straub <[email protected]>
# Licensed under the MIT license. See LICENSE.txt or
# http://www.opensource.org/licenses/mit-license.php
import numpy as np
import matplotlib.pyplot as plt
from aux import *
class RangeSensor:
def __init__(s,maxR,maxPhi,zCov):
s.maxR = maxR
s.maxPhi = maxPhi
s.zCov = zCov
def sense(s,world,x):
obsts = world.obst
obs_vis = []
for obst in obsts:
z = np.dot(s.zCov,np.resize(np.random.randn(2),(2,1)))
z[0] += dist(x[0:2],obst[0:2])
z[1] = ensureRange(z[1] + angle(x,obst[0:2]))
if z[0] <= s.maxR and np.abs(z[1]) < s.maxPhi/2.0:
plt.plot(obst[0],obst[1],'gx')
obs_vis.append(np.array([z[0],z[1],obst[2]]).ravel())
return obs_vis
def predict(s,x,l):
# predict measurement based on robot pose x and landmark position l
zPred = np.zeros((2,1))
dx, dy = l[0]-x[0], l[1]-x[1]
# print 'prediction: dx={}; dy={}; atan={}'.format(dx,dy,np.arctan2(dy,dx) )
zPred[0], zPred[1] = np.sqrt(dx*dx+dy*dy), ensureRange(np.arctan2(dy,dx) - x[2])
return zPred
class MultiModalRangeSensor(RangeSensor):
def __init__(s,maxR,maxPhi,zCov):
RangeSensor.__init__(s,maxR,maxPhi,zCov)
def sense(s,world,x):
obsts = world.obst
obs_vis = []
for obst in obsts:
z = np.dot(s.zCov,np.resize(np.random.randn(2),(2,1)))
z[0] += dist(x[0:2],obst[0:2])
if np.random.rand(1)[0] > 0.5: # flip a coin to obtain multimodal outcome
z[0] += 2
z[1] = ensureRange(z[1] + angle(x,obst[0:2]))
if z[0] <= s.maxR and np.abs(z[1]) < s.maxPhi/2.0:
plt.plot(obst[0],obst[1],'gx')
obs_vis.append(np.array([z[0],z[1],obst[2]]).ravel())
return obs_vis
class ScanerSensor(RangeSensor):
def __init__(s,maxR,maxPhi,dPhi,zCov):
s.dPhi = dPhi
RangeSensor.__init__(s,maxR,maxPhi,zCov)
def sense(s,world,x):
# occupancy grid o
o = np.zeros((np.ceil(s.maxR)*2,np.ceil(s.maxR)*2))
# number of observations
n = np.zeros((np.ceil(s.maxR)*2,np.ceil(s.maxR)*2))
# all directions of the scanner
phis = ensureRange(np.linspace(-s.maxPhi/2.0,s.maxPhi/2.0,s.maxPhi/s.dPhi)+x[2])
# x0: 0 in local coordinates of occupancy grid o
x0 = np.ones(3);
x0[0:2] = x[0:2,0] - np.floor(x[0:2,0]) + np.array([np.floor(s.maxR),np.floor(s.maxR)])
j0w = np.floor(x[0])
i0w = np.floor(x[1])
j0o = np.floor(s.maxR)
i0o = np.floor(s.maxR)
for phi in phis:
xEnd = np.ones(3)
xEnd[0] = x0[0] + np.cos(phi) * s.maxR
xEnd[1] = x0[1] + np.sin(phi) * s.maxR
l = np.cross(x0,xEnd)
# print '-- phi={}'.format(toDeg(phi))
for x in np.linspace(np.floor(x0[0]),np.floor(xEnd[0]), np.abs(np.floor(x0[0])-np.floor(xEnd[0]))+1.0 ):
a,b = [x,0.0,1.0],[x,1.0,1.0]
lx = np.cross(a,b)
y = np.cross(l,lx)
y /= y[2]
#plt.plot(x,y[1],'rx')
i = np.floor(y[1]-0.5)-i0o
j = np.floor(x-0.5)-j0o
if 0<=i+i0w and i+i0w<world.world.shape[1] and 0<=j+j0w and j+j0w<world.world.shape[0] and 0<=i+i0o and i+i0o<o.shape[1] and 0<=j+j0o and j+j0o<o.shape[0] :
# print 'x: ind={}'.format((i,j))
# print 'xo: ind={}'.format((i+i0o,j+j0o))
# print 'xw: ind={}'.format((i+i0w,j+j0w))
o[i+i0o,j+j0o] += world.world[int(i+i0w),int(j+j0w)]
n[i+i0o,j+j0o] += 1
if world.world[int(i+i0w),int(j+j0w)] > 0:
break
for y in np.linspace(np.floor(x0[1]),np.floor(xEnd[1]), np.abs(np.floor(x0[1])-np.floor(xEnd[1]))+1.0 ):
a,b = [0.0,y,1.0],[1.0,y,1.0]
ly = np.cross(a,b)
x = np.cross(l,ly)
x /= x[2]
#plt.plot(x[0],y,'rx')
i = np.floor(y-0.5) - i0o
j = np.floor(x[0]-0.5) - j0o
if 0<=i+i0w and i+i0w<world.world.shape[1] and 0<=j+j0w and j+j0w<world.world.shape[0] and 0<=i+i0o and i+i0o<o.shape[1] and 0<=j+j0o and j+j0o<o.shape[0] :
# print 'y: ind={}'.format((i,j))
# print 'yo: ind={}'.format((i+i0o,j+j0o))
# print 'yw: ind={}'.format((i+i0w,j+j0w))
o[i+i0o,j+j0o] += world.world[int(i+i0w),int(j+j0w)]
n[i+i0o,j+j0o] += 1
if world.world[int(i+i0w),int(j+j0w)] > 0:
break
# print o
return (o,n)
| mit |
kpespinosa/BuildingMachineLearningSystemsWithPython | ch02/figure4_5_sklearn.py | 22 | 2475 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
COLOUR_FIGURE = False
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from load import load_dataset
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
feature_names = [
'area',
'perimeter',
'compactness',
'length of kernel',
'width of kernel',
'asymmetry coefficien',
'length of kernel groove',
]
def plot_decision(features, labels, num_neighbors=1):
'''Plots decision boundary for KNN
Parameters
----------
features : ndarray
labels : sequence
Returns
-------
fig : Matplotlib Figure
ax : Matplotlib Axes
'''
y0, y1 = features[:, 2].min() * .9, features[:, 2].max() * 1.1
x0, x1 = features[:, 0].min() * .9, features[:, 0].max() * 1.1
X = np.linspace(x0, x1, 1000)
Y = np.linspace(y0, y1, 1000)
X, Y = np.meshgrid(X, Y)
model = KNeighborsClassifier(num_neighbors)
model.fit(features[:, (0,2)], labels)
C = model.predict(np.vstack([X.ravel(), Y.ravel()]).T).reshape(X.shape)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .7, .7), (.7, 1., .7), (.7, .7, 1.)])
else:
cmap = ListedColormap([(1., 1., 1.), (.2, .2, .2), (.6, .6, .6)])
fig,ax = plt.subplots()
ax.set_xlim(x0, x1)
ax.set_ylim(y0, y1)
ax.set_xlabel(feature_names[0])
ax.set_ylabel(feature_names[2])
ax.pcolormesh(X, Y, C, cmap=cmap)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .0, .0), (.1, .6, .1), (.0, .0, 1.)])
ax.scatter(features[:, 0], features[:, 2], c=labels, cmap=cmap)
else:
for lab, ma in zip(range(3), "Do^"):
ax.plot(features[labels == lab, 0], features[
labels == lab, 2], ma, c=(1., 1., 1.), ms=6)
return fig,ax
features, labels = load_dataset('seeds')
names = sorted(set(labels))
labels = np.array([names.index(ell) for ell in labels])
fig,ax = plot_decision(features, labels)
fig.tight_layout()
fig.savefig('figure4sklearn.png')
features -= features.mean(0)
features /= features.std(0)
fig,ax = plot_decision(features, labels)
fig.tight_layout()
fig.savefig('figure5sklearn.png')
fig,ax = plot_decision(features, labels, 11)
fig.tight_layout()
fig.savefig('figure5sklearn_with_11_neighbors.png')
| mit |
rafaelvalle/MDI | nnet_full_bin_scaled.py | 1 | 5732 | # Code adapted from https://github.com/Newmu/Theano-Tutorials
import sys, time, os
from ntpath import basename
from os.path import splitext
from itertools import product
import cPickle as pickle
import theano
from theano import tensor as T
import numpy as np
from sklearn.cross_validation import KFold
from params import feats_train_folder, feats_test_folder
def set_trace():
from IPython.core.debugger import Pdb
import sys
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def sgd(cost, params, gamma):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
updates.append([p, p - g * gamma])
return updates
def model(X, w_h, w_o):
h = T.nnet.sigmoid(T.dot(X, w_h))
pyx = T.nnet.softmax(T.dot(h, w_o))
return pyx
# train on every perturbed dataset
filepaths = np.loadtxt("include_data.csv", dtype=object, delimiter=",")
for (include, train_filename, test_filename) in filepaths:
if include == '1':
print '\nExecuting {}'.format(train_filename)
# Load training and test sets
set_trace()
x_train = np.load(os.path.join(feats_train_folder, train_filename))
y_train = (np.eye(2)[x_train[:, -1].astype(int)])
x_test = np.load(os.path.join(feats_test_folder, test_filename))
y_test = (np.eye(2)[x_test[:, -1].astype(int)])
# remove label column from x_train and x_test
x_train = x_train[:,:-1]
x_test = x_test[:,:-1]
# Network topology
n_inputs = x_train.shape[1]
n_outputs = len(np.unique(y_train))
# Cross-validation and Neural Net parameters
# load params from best model
#params_dict = pickle.load(open('params_dict.pkl', 'rb'))
alphas = (9,)
gammas = (0.1,)
batch_sizes = (32,)
max_epoch = 1
# Dictionary to store results
results_dict = {}
params_matrix = np.array([x for x in product(alphas, gammas, batch_sizes)])
params_matrix = np.column_stack((params_matrix,
np.zeros(params_matrix.shape[0]),
np.zeros(params_matrix.shape[0]),
np.zeros(params_matrix.shape[0])))
for param_idx in xrange(params_matrix.shape[0]):
alpha = params_matrix[param_idx, 0]
gamma = params_matrix[param_idx, 1]
batch_size = int(params_matrix[param_idx, 2])
n_hidden = (x_train.shape[0])/(alpha*(n_inputs+n_outputs))
# Initialize weights
w_h = init_weights((n_inputs, n_hidden))
w_o = init_weights((n_hidden, n_outputs))
# Initialize NN classifier
X = T.fmatrix()
Y = T.fmatrix()
py_x = model(X, w_h, w_o)
y_x = T.argmax(py_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(py_x, Y))
params = [w_h, w_o]
updates = sgd(cost, params, gamma=gamma)
train = theano.function(inputs=[X, Y],
outputs=cost,
updates=updates,
allow_input_downcast=True)
predict = theano.function(inputs=[X],
outputs=y_x,
allow_input_downcast=True)
# Test on validation set
model_str = 'alpha {} gamma {} batch size {}'.format(alpha,
gamma,
batch_size)
print model_str
error_rates = []
test_costs = []
running_time = []
start_time = time.time()
for i in range(max_epoch):
for start, end in zip(range(0, len(x_train),
batch_size),
range(batch_size, len(x_train),
batch_size)):
test_cost = train(x_train[start:end],
y_train[start:end])
error_rate = 1 - np.mean(np.argmax(y_train, axis=1) == predict(x_train))
if (i % (max_epoch / 4)) == 0 and verbose:
print 'fold {}, epoch {}, error rate {}, cost {}'.format(fold, i+1,
error_rate,
test_cost)
error_rates.append(error_rate)
test_costs.append(test_cost)
running_time.append(np.around((time.time() - start_time) / 60., 1))
params_matrix[param_idx, 3] = np.mean(error_rate)
params_matrix[param_idx, 4] = np.mean(test_cost)
params_matrix[param_idx, 5] = np.mean(running_time)
print 'alpha {} gamma {} batchsize {} error rate {} test cost {} running time {}'.format(params_matrix[param_idx,0],
params_matrix[param_idx,1],
params_matrix[param_idx,2],
params_matrix[param_idx,3],
params_matrix[param_idx,4],
params_matrix[param_idx,5])
error_rate_test = 1 - np.mean(np.argmax(y_test, axis=1) == predict(x_test))
print 'Test Error rate : {}'.format(error_rate_test)
# Save params matrix to disk
params_matrix.dump('{}_results.np'.format(filename))
| mit |
nigroup/pypet | examples/example_11_large_scale_brian_simulation/clusternet.py | 1 | 30137 | """Module to run the clustered Neural Network Simulations as in Litwin-Kumar & Doiron 2012"""
__author__ = 'Robert Meyer'
import os
import numpy as np
import matplotlib.pyplot as plt
from pypet.trajectory import Trajectory
from pypet.brian.parameter import BrianParameter, BrianMonitorResult
from pypet.brian.network import NetworkComponent, NetworkRunner, NetworkAnalyser
from brian.stdunits import ms
from brian import NeuronGroup, rand, Connection, Equations, Network, SpikeMonitor, second, \
raster_plot, show, StateMonitor, clear, reinit_default_clock
def _explored_parameters_in_group(traj, group_node):
"""Checks if one the parameters in `group_node` is explored.
:param traj: Trajectory container
:param group_node: Group node
:return: `True` or `False`
"""
explored = False
for param in traj.f_get_explored_parameters():
if param in group_node:
explored = True
break
return explored
class CNNeuronGroup(NetworkComponent):
"""Class to create neuron groups.
Creates two groups of excitatory and inhibitory neurons.
"""
@staticmethod
def add_parameters(traj):
"""Adds all neuron group parameters to `traj`."""
assert(isinstance(traj,Trajectory))
scale = traj.simulation.scale
traj.v_standard_parameter = BrianParameter
model_eqs = '''dV/dt= 1.0/tau_POST * (mu - V) + I_syn : 1
mu : 1
I_syn = - I_syn_i + I_syn_e : Hz
'''
conn_eqs = '''I_syn_PRE = x_PRE/(tau2_PRE-tau1_PRE) : Hz
dx_PRE/dt = -(normalization_PRE*y_PRE+x_PRE)*invtau1_PRE : 1
dy_PRE/dt = -y_PRE*invtau2_PRE : 1
'''
traj.f_add_parameter('model.eqs', model_eqs,
comment='The differential equation for the neuron model')
traj.f_add_parameter('model.synaptic.eqs', conn_eqs,
comment='The differential equation for the synapses. '
'PRE will be replaced by `i` or `e` depending '
'on the source population')
traj.f_add_parameter('model.synaptic.tau1', 1*ms, comment = 'The decay time')
traj.f_add_parameter('model.synaptic.tau2_e', 3*ms, comment = 'The rise time, excitatory')
traj.f_add_parameter('model.synaptic.tau2_i', 2*ms, comment = 'The rise time, inhibitory')
traj.f_add_parameter('model.V_th', 1.0, comment = "Threshold value")
traj.f_add_parameter('model.reset_func', 'V=0.0',
comment = "String representation of reset function")
traj.f_add_parameter('model.refractory', 5*ms, comment = "Absolute refractory period")
traj.f_add_parameter('model.N_e', int(4000*scale), comment = "Amount of excitatory neurons")
traj.f_add_parameter('model.N_i', int(1000*scale), comment = "Amount of inhibitory neurons")
traj.f_add_parameter('model.tau_e', 15*ms, comment = "Membrane time constant, excitatory")
traj.f_add_parameter('model.tau_i', 10*ms, comment = "Membrane time constant, inhibitory")
traj.f_add_parameter('model.mu_e_min', 1.1, comment = "Lower bound for bias, excitatory")
traj.f_add_parameter('model.mu_e_max', 1.2, comment = "Upper bound for bias, excitatory")
traj.f_add_parameter('model.mu_i_min', 1.0, comment = "Lower bound for bias, inhibitory")
traj.f_add_parameter('model.mu_i_max', 1.05, comment = "Upper bound for bias, inhibitory")
@staticmethod
def _build_model_eqs(traj):
"""Computes model equations for the excitatory and inhibitory population.
Equation objects are created by fusing `model.eqs` and `model.synaptic.eqs`
and replacing `PRE` by `i` (for inhibitory) or `e` (for excitatory) depending
on the type of population.
:return: Dictionary with 'i' equation object for inhibitory neurons and 'e' for excitatory
"""
model_eqs = traj.model.eqs
post_eqs={}
for name_post in ['i','e']:
variables_dict ={}
new_model_eqs=model_eqs.replace('POST', name_post)
for name_pre in ['i', 'e']:
conn_eqs = traj.model.synaptic.eqs
new_conn_eqs = conn_eqs.replace('PRE', name_pre)
new_model_eqs += new_conn_eqs
tau1 = traj.model.synaptic['tau1']
tau2 = traj.model.synaptic['tau2_'+name_pre]
normalization = (tau1-tau2) / tau2
invtau1=1.0/tau1
invtau2 = 1.0/tau2
variables_dict['invtau1_'+name_pre] = invtau1
variables_dict['invtau2_'+name_pre] = invtau2
variables_dict['normalization_'+name_pre] = normalization
variables_dict['tau1_'+name_pre] = tau1
variables_dict['tau2_'+name_pre] = tau2
variables_dict['tau_'+name_post] = traj.model['tau_'+name_post]
post_eqs[name_post] = Equations(new_model_eqs, **variables_dict)
return post_eqs
def pre_build(self, traj, brian_list, network_dict):
"""Pre-builds the neuron groups.
Pre-build is only performed if none of the
relevant parameters is explored.
:param traj: Trajectory container
:param brian_list:
List of objects passed to BRIAN network constructor.
Adds:
Inhibitory neuron group
Excitatory neuron group
:param network_dict:
Dictionary of elements shared among the components
Adds:
'neurons_i': Inhibitory neuron group
'neurons_e': Excitatory neuron group
"""
self._pre_build = not _explored_parameters_in_group(traj, traj.parameters.model)
if self._pre_build:
self._build_model(traj, brian_list, network_dict)
def build(self, traj, brian_list, network_dict):
"""Builds the neuron groups.
Build is only performed if neuron group was not
pre-build before.
:param traj: Trajectory container
:param brian_list:
List of objects passed to BRIAN network constructor.
Adds:
Inhibitory neuron group
Excitatory neuron group
:param network_dict:
Dictionary of elements shared among the components
Adds:
'neurons_i': Inhibitory neuron group
'neurons_e': Excitatory neuron group
"""
if not hasattr(self, '_pre_build') or not self._pre_build:
self._build_model(traj, brian_list, network_dict)
def _build_model(self, traj, brian_list, network_dict):
"""Builds the neuron groups from `traj`.
Adds the neuron groups to `brian_list` and `network_dict`.
"""
model = traj.parameters.model
# Create the equations for both models
eqs_dict = self._build_model_eqs(traj)
# Create inhibitory neurons
eqs_i = eqs_dict['i']
neurons_i = NeuronGroup(N=model.N_i,
model = eqs_i,
threshold=model.V_th,
reset=model.reset_func,
refractory=model.refractory,
freeze=True,
compile=True,
method='Euler')
# Create excitatory neurons
eqs_e = eqs_dict['e']
neurons_e = NeuronGroup(N=model.N_e,
model = eqs_e,
threshold=model.V_th,
reset=model.reset_func,
refractory=model.refractory,
freeze=True,
compile=True,
method='Euler')
# Set the bias terms
neurons_e.mu =rand(model.N_e) * (model.mu_e_max - model.mu_e_min) + model.mu_e_min
neurons_i.mu =rand(model.N_i) * (model.mu_i_max - model.mu_i_min) + model.mu_i_min
# Set initial membrane potentials
neurons_e.V = rand(model.N_e)
neurons_i.V = rand(model.N_i)
# Add both groups to the `brian_list` and the `network_dict`
brian_list.append(neurons_i)
brian_list.append(neurons_e)
network_dict['neurons_e']=neurons_e
network_dict['neurons_i']=neurons_i
class CNConnections(NetworkComponent):
"""Class to connect neuron groups.
In case of no clustering `R_ee=1,0` there are 4 connection instances (i->i, i->e, e->i, e->e).
Otherwise there are 3 + 3*N_c-2 connections with N_c the number of clusters
(i->i, i->e, e->i, N_c conns within cluster, 2*N_c-2 connections from cluster to outside).
"""
@staticmethod
def add_parameters(traj):
"""Adds all neuron group parameters to `traj`."""
assert(isinstance(traj,Trajectory))
traj.v_standard_parameter = BrianParameter
scale = traj.simulation.scale
traj.f_add_parameter('connections.R_ee', 1.0, comment='Scaling factor for clustering')
traj.f_add_parameter('connections.clustersize_e', 80, comment='Size of a cluster')
traj.f_add_parameter('connections.strength_factor', 1.9,
comment='Factor for scaling cluster weights')
traj.f_add_parameter('connections.p_ii', 0.5,
comment='Connection probability from inhibitory to inhibitory' )
traj.f_add_parameter('connections.p_ei', 0.5,
comment='Connection probability from inhibitory to excitatory' )
traj.f_add_parameter('connections.p_ie', 0.5,
comment='Connection probability from excitatory to inhibitory' )
traj.f_add_parameter('connections.p_ee', 0.2,
comment='Connection probability from excitatory to excitatory' )
traj.f_add_parameter('connections.J_ii', 0.057/np.sqrt(scale),
comment='Connection strength from inhibitory to inhibitory')
traj.f_add_parameter('connections.J_ei', 0.045/np.sqrt(scale),
comment='Connection strength from inhibitory to excitatroy')
traj.f_add_parameter('connections.J_ie', 0.014/np.sqrt(scale),
comment='Connection strength from excitatory to inhibitory')
traj.f_add_parameter('connections.J_ee', 0.024/np.sqrt(scale),
comment='Connection strength from excitatory to excitatory')
def pre_build(self, traj, brian_list, network_dict):
"""Pre-builds the connections.
Pre-build is only performed if none of the
relevant parameters is explored and the relevant neuron groups
exist.
:param traj: Trajectory container
:param brian_list:
List of objects passed to BRIAN network constructor.
Adds:
Connections, amount depends on clustering
:param network_dict:
Dictionary of elements shared among the components
Expects:
'neurons_i': Inhibitory neuron group
'neurons_e': Excitatory neuron group
Adds:
Connections, amount depends on clustering
"""
self._pre_build = not _explored_parameters_in_group(traj, traj.parameters.connections)
self._pre_build = (self._pre_build and 'neurons_i' in network_dict and
'neurons_e' in network_dict)
if self._pre_build:
self._build_connections(traj, brian_list, network_dict)
def build(self, traj, brian_list, network_dict):
"""Builds the connections.
Build is only performed if connections have not
been pre-build.
:param traj: Trajectory container
:param brian_list:
List of objects passed to BRIAN network constructor.
Adds:
Connections, amount depends on clustering
:param network_dict:
Dictionary of elements shared among the components
Expects:
'neurons_i': Inhibitory neuron group
'neurons_e': Excitatory neuron group
Adds:
Connections, amount depends on clustering
"""
if not hasattr(self, '_pre_build') or not self._pre_build:
self._build_connections(traj, brian_list, network_dict)
def _build_connections(self, traj, brian_list, network_dict):
"""Connects neuron groups `neurons_i` and `neurons_e`.
Adds all connections to `brian_list` and adds a list of connections
with the key 'connections' to the `network_dict`.
"""
connections = traj.connections
neurons_i = network_dict['neurons_i']
neurons_e = network_dict['neurons_e']
print 'Connecting ii'
self.conn_ii = Connection(neurons_i,neurons_i, state='y_i',
weight=connections.J_ii,
sparseness=connections.p_ii)
print 'Connecting ei'
self.conn_ei = Connection(neurons_i,neurons_e,state='y_i',
weight=connections.J_ei,
sparseness=connections.p_ei)
print 'Connecting ie'
self.conn_ie = Connection(neurons_e,neurons_i,state='y_e',
weight=connections.J_ie,
sparseness=connections.p_ie)
conns_list = [self.conn_ii, self.conn_ei, self.conn_ie]
if connections.R_ee > 1.0:
# If we come here we want to create clusters
cluster_list=[]
cluster_conns_list=[]
model=traj.model
# Compute the number of clusters
clusters = model.N_e/connections.clustersize_e
traj.f_add_derived_parameter('connections.clusters', clusters, comment='Number of clusters')
# Compute outgoing connection probability
p_out = (connections.p_ee*model.N_e) / \
(connections.R_ee*connections.clustersize_e+model.N_e- connections.clustersize_e)
# Compute within cluster connection probability
p_in = p_out * connections.R_ee
# We keep these derived parameters
traj.f_add_derived_parameter('connections.p_ee_in', p_in ,
comment='Connection prob within cluster')
traj.f_add_derived_parameter('connections.p_ee_out', p_out ,
comment='Connection prob to outside of cluster')
low_index = 0
high_index = connections.clustersize_e
# Iterate through cluster and connect within clusters and to the rest of the neurons
for irun in range(clusters):
cluster = neurons_e[low_index:high_index]
# Connections within cluster
print 'Connecting ee cluster #%d of %d' % (irun, clusters)
conn = Connection(cluster,cluster,state='y_e',
weight=connections.J_ee*connections.strength_factor,
sparseness=p_in)
cluster_conns_list.append(conn)
# Connections reaching out from cluster
# A cluster consists of `clustersize_e` neurons with consecutive indices.
# So usually the outside world consists of two groups, neurons with lower
# indices than the cluster indices, and neurons with higher indices.
# Only the clusters at the index boundaries project to neurons with only either
# lower or higher indices
if low_index > 0:
rest_low = neurons_e[0:low_index]
print 'Connecting cluster with other neurons of lower index'
low_conn = Connection(cluster,rest_low,state='y_e',
weight=connections.J_ee,
sparseness=p_out)
cluster_conns_list.append(low_conn)
if high_index < model.N_e:
rest_high = neurons_e[high_index:model.N_e]
print 'Connecting cluster with other neurons of higher index'
high_conn = Connection(cluster,rest_high,state='y_e',
weight=connections.J_ee,
sparseness=p_out)
cluster_conns_list.append(high_conn)
low_index=high_index
high_index+=connections.clustersize_e
self.cluster_conns=cluster_conns_list
conns_list+=cluster_conns_list
else:
# Here we don't cluster and connection probabilities are homogeneous
print 'Connectiong ee'
self.conn_ee = Connection(neurons_e,neurons_e,state='y_e',
weight=connections.J_ee,
sparseness=connections.p_ee)
conns_list.append(self.conn_ee)
# Add the connections to the `brian_list` and the network dict
brian_list.extend(conns_list)
network_dict['connections'] = conns_list
class CNNetworkRunner(NetworkRunner):
"""Runs the network experiments.
Adds two BrianParameters, one for an initial run, and one for a run
that is actually measured.
"""
def add_parameters(self, traj):
"""Adds all necessary parameters to `traj` container."""
par= traj.f_add_parameter(BrianParameter,'simulation.durations.initial_run', 500*ms,
comment='Initialisation run for more realistic '
'measurement conditions.')
par.v_annotations.order=0
par=traj.f_add_parameter(BrianParameter,'simulation.durations.measurement_run', 2000*ms,
comment='Measurement run that is considered for '
'statistical evaluation')
par.v_annotations.order=1
class CNFanoFactorComputer(NetworkAnalyser):
"""Computes the FanoFactor if the MonitorAnalyser has extracted data"""
def add_parameters(self, traj):
traj.f_add_parameter('analysis.statistics.time_window', 0.1 , 'Time window for FF computation')
traj.f_add_parameter('analysis.statistics.neuron_ids', tuple(range(500)),
comment= 'Neurons to be taken into account to compute FF')
@staticmethod
def _compute_fano_factor(spike_table, neuron_id, time_window, start_time, end_time):
"""Computes Fano Factor for one neuron.
:param spike_table:
DataFrame containing the spiketimes of all neurons
:param neuron_id:
Index of neuron for which FF is computed
:param time_window:
Length of the consecutive time windows to compute the FF
:param start_time:
Start time of measurement to consider
:param end_time:
End time of measurement to consider
:return:
Fano Factor (float) or
returns 0 if mean firing activity is 0.
"""
assert(end_time >= start_time+time_window)
# Number of time bins
bins = (end_time-start_time)/float(time_window)
bins = int(np.floor(bins))
# Arrays for binning of spike counts
binned_spikes = np.zeros(bins)
# DataFrame only containing spikes of the particular neuron
spike_table_neuron = spike_table[spike_table.neuron==neuron_id]
for bin in range(bins):
# We iterate over the bins to calculate the spike counts
lower_time = start_time+time_window*bin
upper_time = start_time+time_window*(bin+1)
# Filter the spikes
spike_table_interval = spike_table_neuron[spike_table_neuron.spiketimes >= lower_time]
spike_table_interval = spike_table_interval[spike_table_interval.spiketimes < upper_time]
# Add count to bins
spikes = len(spike_table_interval)
binned_spikes[bin]=spikes
var = np.var(binned_spikes)
avg = np.mean(binned_spikes)
if avg > 0:
return var/float(avg)
else:
return 0
@staticmethod
def _compute_mean_fano_factor( neuron_ids, spike_table, time_window, start_time, end_time):
"""Computes average Fano Factor over many neurons.
:param neuron_ids:
List of neuron indices to average over
:param spike_table:
DataFrame containing the spiketimes of all neurons
:param time_window:
Length of the consecutive time windows to compute the FF
:param start_time:
Start time of measurement to consider
:param end_time:
End time of measurement to consider
:return:
Average fano factor
"""
ffs = np.zeros(len(neuron_ids))
for idx, neuron_id in enumerate(neuron_ids):
ff=CNFanoFactorComputer._compute_fano_factor(
spike_table, neuron_id, time_window, start_time, end_time)
ffs[idx]=ff
mean_ff = np.mean(ffs)
return mean_ff
def analyse(self, traj, network, current_subrun, subrun_list, network_dict):
"""Calculates average Fano Factor of a network.
:param traj:
Trajectory container
Expects:
`results.monitors.spikes_e`: Data from SpikeMonitor for excitatory neurons
Adds:
`results.statistics.mean_fano_factor`: Average Fano Factor
:param network:
The BRIAN network
:param current_subrun:
BrianParameter
:param subrun_list:
Upcoming subruns, analysis is only performed if subruns is empty,
aka the final subrun has finished.
:param network_dict:
Dictionary of items shared among componetns
"""
#Check if we finished all subruns
if len(subrun_list)==0:
spikes_e = traj.results.monitors.spikes_e
time_window = traj.parameters.analysis.statistics.time_window
start_time = float(traj.parameters.simulation.durations.initial_run)
end_time = start_time+float(traj.parameters.simulation.durations.measurement_run)
neuron_ids = traj.parameters.analysis.statistics.neuron_ids
mean_ff = self._compute_mean_fano_factor(
neuron_ids, spikes_e.spikes, time_window, start_time, end_time)
traj.f_add_result('statistics.mean_fano_factor', mean_ff, comment='Average Fano '
'Factor over all '
'exc neurons')
print 'R_ee: %f, Mean FF: %f' % (traj.R_ee, mean_ff)
class CNMonitorAnalysis(NetworkAnalyser):
"""Adds monitors for recoding and plots the monitor output."""
@staticmethod
def add_parameters( traj):
traj.f_add_parameter('analysis.neuron_records',(0,1,100,101),
comment='Neuron indices to record from.')
traj.f_add_parameter('analysis.plot_folder',
os.path.join('experiments', 'example_11', 'PLOTS'),
comment='Folder for plots')
traj.f_add_parameter('analysis.show_plots', 0, comment='Whether to show plots.')
traj.f_add_parameter('analysis.make_plots', 1, comment='Whether to make plots.')
def add_to_network(self, traj, network, current_subrun, subrun_list, network_dict):
"""Adds monitors to the network if the measurement run is carried out.
:param traj: Trajectory container
:param network: The BRIAN network
:param current_subrun: BrianParameter
:param subrun_list: List of coming subrun_list
:param network_dict:
Dictionary of items shared among the components
Expects:
'neurons_e': Excitatory neuron group
Adds:
'monitors': List of monitors
0. SpikeMonitor of excitatory neurons
1. StateMonitor of membrane potential of some excitatory neurons
(specified in `neuron_records`)
2. StateMonitor of excitatory synaptic currents of some excitatory neurons
3. State monitor of inhibitory currents of some excitatory neurons
"""
if current_subrun.v_annotations.order == 1:
self._add_monitors(traj, network, network_dict)
def _add_monitors(self, traj, network, network_dict):
"""Adds monitors to the network"""
neurons_e = network_dict['neurons_e']
monitor_list = []
# Spiketimes
self.spike_monitor = SpikeMonitor(neurons_e, delay=0*ms)
monitor_list.append(self.spike_monitor)
# Membrane Potential
self.V_monitor = StateMonitor(neurons_e,'V',
record=list(traj.neuron_records))
monitor_list.append(self.V_monitor)
# Exc. syn .Current
self.I_syn_e_monitor = StateMonitor(neurons_e, 'I_syn_e',
record=list(traj.neuron_records))
monitor_list.append(self.I_syn_e_monitor)
# Inh. syn. Current
self.I_syn_i_monitor = StateMonitor(neurons_e, 'I_syn_i',
record=list(traj.neuron_records))
monitor_list.append(self.I_syn_i_monitor)
# Add monitors to network and dictionary
network.add(*monitor_list)
network_dict['monitors'] = monitor_list
def _make_folder(self, traj):
"""Makes a subfolder for plots.
:return: Path name to print folder
"""
print_folder = os.path.join(traj.analysis.plot_folder,
traj.v_name, traj.v_crun)
print_folder = os.path.abspath(print_folder)
if not os.path.isdir(print_folder):
os.makedirs(print_folder)
return print_folder
def _plot_result(self, traj, result_name):
"""Plots a state variable graph for several neurons into one figure"""
result = traj.f_get(result_name)
values = result.values
varname = result.varname
unit = result.unit
times = result.times
record = result.record
for idx, celia_neuron in enumerate(record):
plt.subplot(len(record), 1, idx+1)
plt.plot(times, values[idx,:])
if idx==0:
plt.title('%s' % varname)
if idx==1:
plt.ylabel('%s/%s' % ( varname,unit))
if idx == len(record)-1:
plt.xlabel('t/ms')
def _print_graphs(self, traj):
"""Makes some plots and stores them into subfolders"""
print_folder = self._make_folder(traj)
# If we use BRIAN's own raster_plot functionality we
# need to sue the SpikeMonitor directly
raster_plot(self.spike_monitor, newfigure=True, xlabel='t', ylabel='Exc. Neurons',
title='Spike Raster Plot')
filename=os.path.join(print_folder,'spike.png')
print 'Current plot: %s ' % filename
plt.savefig(filename)
plt.close()
fig=plt.figure()
self._plot_result(traj, 'monitors.V')
filename=os.path.join(print_folder,'V.png')
print 'Current plot: %s ' % filename
fig.savefig(filename)
plt.close()
plt.figure()
self._plot_result(traj, 'monitors.I_syn_e')
filename=os.path.join(print_folder,'I_syn_e.png')
print 'Current plot: %s ' % filename
plt.savefig(filename)
plt.close()
plt.figure()
self._plot_result(traj, 'monitors.I_syn_i')
filename=os.path.join(print_folder,'I_syn_i.png')
print 'Current plot: %s ' % filename
plt.savefig(filename)
plt.close()
if not traj.analysis.show_plots:
plt.close('all')
else:
plt.show()
def analyse(self, traj, network, current_subrun, subrun_list, network_dict):
"""Extracts monitor data and plots.
Data extraction is done if all subruns have been completed,
i.e. `len(subrun_list)==0`
First, extracts results from the monitors and stores them into `traj`.
Next, uses the extracted data for plots.
:param traj:
Trajectory container
Adds:
Data from monitors
:param network: The BRIAN network
:param current_subrun: BrianParameter
:param subrun_list: List of coming subruns
:param network_dict: Dictionary of items shared among all components
"""
if len(subrun_list)==0:
traj.f_add_result(BrianMonitorResult, 'monitors.spikes_e', self.spike_monitor,
comment = 'The spiketimes of the excitatory population')
traj.f_add_result(BrianMonitorResult, 'monitors.V', self.V_monitor,
comment = 'Membrane voltage of four neurons from 2 clusters')
traj.f_add_result(BrianMonitorResult, 'monitors.I_syn_e', self.I_syn_e_monitor,
comment = 'I_syn_e of four neurons from 2 clusters')
traj.f_add_result(BrianMonitorResult, 'monitors.I_syn_i', self.I_syn_i_monitor,
comment = 'I_syn_i of four neurons from 2 clusters')
print 'Plotting'
if traj.parameters.analysis.make_plots:
self._print_graphs(traj)
| bsd-3-clause |
Connor-R/nba_shot_charts | charting/helper_charting.py | 1 | 21226 | # A set of helper functions for the nba_shot_chart codebase
import urllib
import os
import csv
import sys
import math
import pandas as pd
import numpy as np
import matplotlib as mpb
import matplotlib.pyplot as plt
from matplotlib import offsetbox as osb
from matplotlib.patches import RegularPolygon
from datetime import date
from py_data_getter import data_getter
from py_db import db
import helper_data
db = db('nba_shots')
# setting the color map we want to use
mymap = mpb.cm.OrRd
np.seterr(divide='ignore', invalid='ignore')
#Drawing the outline of the court
#Most of this code was recycled from Savvas Tjortjoglou [http://savvastjortjoglou.com]
def draw_court(ax=None, color='white', lw=2, outer_lines=False):
from matplotlib.patches import Circle, Rectangle, Arc
if ax is None:
ax = plt.gca()
hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False)
backboard = Rectangle((-30, -7.5), 60, -1, linewidth=lw, color=color)
outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color,
fill=False)
inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color,
fill=False)
top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180,
linewidth=lw, color=color, fill=False)
bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color, linestyle='dashed')
restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw,
color=color)
corner_three_a = Rectangle((-220, -50.0), 0, 140, linewidth=lw,
color=color)
corner_three_b = Rectangle((219.75, -50.0), 0, 140, linewidth=lw, color=color)
three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw,
color=color)
center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color)
center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0,
linewidth=lw, color=color)
court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw,
bottom_free_throw, restricted, corner_three_a,
corner_three_b, three_arc, center_outer_arc,
center_inner_arc]
if outer_lines:
outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw,
color=color, fill=False)
court_elements.append(outer_lines)
for element in court_elements:
ax.add_patch(element)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
return ax
# we set gridNum to be 30 (basically a grid of 30x30 hexagons)
def shooting_plot(dataType, path, shot_df, _id, season_id, _title, _name, isCareer=False, min_year = 0, max_year = 0, plot_size=(24,24), gridNum=30):
# get the shooting percentage and number of shots for all bins, all shots, and a subset of some shots
(ShootingPctLocs, shotNumber), shot_count_all = find_shootingPcts(shot_df, gridNum)
all_efg_percentile = float(helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'EFG_Percentile'))
color_efg = max(min((all_efg_percentile/100),1.0),0.0)
paa = float(helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'paa'))
# set the figure for drawing on
fig = plt.figure(figsize=(24,24))
# cmap will be used as our color map going forward
cmap = mymap
# where to place the plot within the figure, first two attributes are the x_min and y_min, and the next 2 are the % of the figure that is covered in the x_direction and y_direction (so in this case, our plot will go from (0.05, 0.15) at the bottom left, and stretches to (0.85,0.925) at the top right)
ax = plt.axes([0.05, 0.15, 0.81, 0.775])
# setting the background color using a hex code (http://www.rapidtables.com/web/color/RGB_Color.htm)
# ax.set_facecolor('#0C232E')
ax.set_facecolor('#152535')
# draw the outline of the court
draw_court(outer_lines=False)
# specify the dimensions of the court we draw
plt.xlim(-250,250)
plt.ylim(370, -30)
# drawing the bottom right image
zoom = 1 # we don't need to zoom the image at all
if dataType == 'player':
img = acquire_playerPic(_id, zoom)
else:
img = acquire_teamPic(season_id, _title, _id, zoom)
ax.add_artist(img)
# specify the % a zone that we want to correspond to a maximum sized hexagon [I have this set to any zone with >= 1% of all shots will have a maximum radius, but it's free to be changed based on personal preferences]
max_radius_perc = 1.0
max_rad_multiplier = 100.0/max_radius_perc
# changing to what power we want to scale the area of the hexagons as we increase/decrease the radius. This value can also be changed for personal preferences.
area_multiplier = (3./4.)
lg_efg = float(helper_data.get_lg_efg(season_id, isCareer))
# draw hexagons
# i is the bin#, and shots is the shooting% for that bin
for i, shots in enumerate(ShootingPctLocs):
x,y = shotNumber.get_offsets()[i]
# we check the distance from the hoop the bin is. If it in 3pt territory, we add a multiplier of 1.5 to the shooting% to properly encapsulate eFG%
dist = math.sqrt(x**2 + y**2)
mult = 1.0
if abs(x) >= 220:
mult = 1.5
elif dist/10 >= 23.75:
mult = 1.5
else:
mult = 1.0
# Setting the eFG% for a bin, making sure it's never over 1 (our maximum color value)
color_pct = ((shots*mult)/lg_efg)-0.5
bin_pct = max(min(color_pct, 1.0), 0.0)
hexes = RegularPolygon(
shotNumber.get_offsets()[i], #x/y coords
numVertices=6,
radius=(295/gridNum)*((max_rad_multiplier*((shotNumber.get_array()[i]))/shot_count_all)**(area_multiplier)),
color=cmap(bin_pct),
alpha=0.95,
fill=True)
# setting a maximum radius for our bins at 295 (personal preference)
if hexes.radius > 295/gridNum:
hexes.radius = 295/gridNum
ax.add_patch(hexes)
# creating the frequency legend
# we want to have 4 ticks in this legend so we iterate through 4 items
for i in range(0,4):
base_rad = max_radius_perc/4
# the x,y coords for our patch (the first coordinate is (-205,415), and then we move up and left for each addition coordinate)
patch_x = -205-(10*i)
patch_y = 365-(14*i)
# specifying the size of our hexagon in the frequency legend
patch_rad = (299.9/gridNum)*((base_rad+(base_rad*i))**(area_multiplier))
patch_perc = base_rad+(i*base_rad)
# the x,y coords for our text
text_x = patch_x + patch_rad + 2
text_y = patch_y
patch_axes = (patch_x, patch_y)
# the text will be slightly different for our maximum sized hexagon,
if i < 3:
text_text = ' %s%% of Attempted Shots' % ('%.2f' % patch_perc)
else:
text_text = '$\geq$%s%% of Attempted Shots' %(str(patch_perc))
# draw the hexagon. the color=map(eff_fg_all_float/100) makes the hexagons in the legend the same color as the player's overall eFG%
patch = RegularPolygon(patch_axes, numVertices=6, radius=patch_rad, color=cmap(color_efg), alpha=0.95, fill=True)
ax.add_patch(patch)
# add the text for the hexagon
ax.text(text_x, text_y, text_text, fontsize=16, horizontalalignment='left', verticalalignment='center', family='DejaVu Sans', color='white', fontweight='bold')
# Add a title to our frequency legend (the x/y coords are hardcoded).
# Again, the color=map(eff_fg_all_float/100) makes the hexagons in the legend the same color as the player's overall eFG%
ax.text(-235, 310, 'Zone Frequencies', fontsize=16, horizontalalignment='left', verticalalignment='bottom', family='DejaVu Sans', color=cmap(color_efg), fontweight='bold')
# Add a title to our chart (just the player's name)
chart_title = "%s | %s" % (_title.upper(), season_id)
ax.text(31.25,-40, chart_title, fontsize=32, horizontalalignment='center', verticalalignment='bottom', family='DejaVu Sans', color=cmap(color_efg), fontweight='bold')
# Add user text
ax.text(-250,-31,'CHARTS BY CONNOR REED (@NBAChartBot)',
fontsize=16, horizontalalignment='left', verticalalignment = 'bottom', family='DejaVu Sans', color='white', fontweight='bold')
# Add data source text
ax.text(31.25,-31,'DATA FROM STATS.NBA.COM',
fontsize=16, horizontalalignment='center', verticalalignment = 'bottom', family='DejaVu Sans', color='white', fontweight='bold')
# Add date text
_date = date.today()
ax.text(250,-31,'AS OF %s' % (str(_date)),
fontsize=16, horizontalalignment='right', verticalalignment = 'bottom', family='DejaVu Sans', color='white', fontweight='bold')
key_text = get_key_text(dataType, _id, season_id, isCareer)
# adding breakdown of eFG% by shot zone at the bottom of the chart
ax.text(307,380, key_text, fontsize=20, horizontalalignment='right', verticalalignment = 'top', family='DejaVu Sans', color='white', linespacing=1.5)
if dataType == 'player':
teams_text, team_len = get_teams_text(_id, season_id, isCareer)
else:
teams_text = _title
team_len = 0
# adding which season the chart is for, as well as what teams the player is on
if team_len > 12:
ax.text(-250,380, season_id + ' Regular Season:\n' + teams_text,
fontsize=20, horizontalalignment='left', verticalalignment = 'top', family='DejaVu Sans', color='white', linespacing=1.4)
else:
ax.text(-250,380, season_id + ' Regular Season:\n' + teams_text,
fontsize=20, horizontalalignment='left', verticalalignment = 'top', family='DejaVu Sans', color='white', linespacing=1.6)
# adding a color bar for reference
ax2 = fig.add_axes([0.875, 0.15, 0.04, 0.775])
cb = mpb.colorbar.ColorbarBase(ax2,cmap=cmap, orientation='vertical')
cbytick_obj = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cbytick_obj, color='white', fontweight='bold',fontsize=16)
cb.set_label('EFG+ (100 is League Average)', family='DejaVu Sans', color='white', fontweight='bold', labelpad=-4, fontsize=24)
cb.set_ticks([0.0, 0.25, 0.5, 0.75, 1.0])
cb.set_ticklabels(['$\mathbf{\leq}$50','75', '100','125', '$\mathbf{\geq}$150'])
figtit = path+'%s(%s)_%s.png' % (_name, _id, season_id.replace('PBP ERA (1996/97 onward)','').replace(' ',''))
plt.savefig(figtit, facecolor='#2E3748', edgecolor='black')
plt.clf()
#Producing the text for the bottom of the shot chart
def get_key_text(dataType, _id, season_id, isCareer, isTwitter=False):
text = ''
total_atts = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'r.attempts'))
total_makes = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'b.makes'))
total_games = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'r.games'))
total_attPerGame = ("%.1f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'r.attempts/r.games'))
vol_percentile = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'AttemptsPerGame_percentile'))
vol_word = helper_data.get_text_description('AttemptsPerGame', vol_percentile)
vol_text = '$\mathbf{' + vol_word.upper() + '}$ Volume | ' + str(total_makes) + ' for ' + str(total_atts) + ' in ' + str(total_games) + ' Games | ' + str(total_attPerGame) + ' FGA/Game, $\mathbf{P_{' + str(vol_percentile) + '}}$'
vol_twitter_text = 'Volume: ' + vol_word.upper() + ' | P_' + str(vol_percentile) + ' (percentile)'
shotSkillPlus = ("%.1f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'All', 's.ShotSkillPlus'))
shotSkill_percentile = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'shotSkill_Percentile'))
shotSkill_word = helper_data.get_text_description('shotSkill', shotSkill_percentile)
shotSkill_text = '$\mathbf{' + shotSkill_word.upper() + '}$ Shot Skill | ' + str(shotSkillPlus) + ' ShotSkill+, $\mathbf{P_{' + str(shotSkill_percentile) + '}}$'
shotSkill_twitter_text = 'Shot Skill: ' + shotSkill_word.upper() + ' | P_' + str(shotSkill_percentile)
efg = ("%.1f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'All', 'd.efg*100'))
efgPlus = ("%.1f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'All', 'r.efg_plus'))
efg_percentile = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'EFG_Percentile'))
efg_word = helper_data.get_text_description('EFG', efg_percentile)
efg_text = '$\mathbf{' + efg_word.upper() + '}$ Efficiency | ' + str(efg) + ' EFG% | ' + str(efgPlus) + ' EFG+, $\mathbf{P_{' + str(efg_percentile) + '}}$'
efg_twitter_text = 'Efficiency: ' + efg_word.upper() + ' | P_' + str(efg_percentile)
PAA = ("%.1f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'All', 'r.paa'))
PAAperGame = ("%.1f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'All', 'r.paa_per_game'))
PAA_percentile = ("%.0f" % helper_data.get_metrics(dataType, _id, season_id, isCareer, 'all', 'PAAperGame_percentile'))
PAA_word = helper_data.get_text_description('PAAperGame', PAA_percentile)
PAA_text = '$\mathbf{' + PAA_word.upper() + '}$ Efficiency Value Added | ' + str(PAA) + ' Total PAA | ' + str(PAAperGame) + ' PAA/Game, $\mathbf{P_{' + str(PAA_percentile) + '}}$'
PAA_twitter_text = 'Efficiency Value: ' + PAA_word.upper() + ' | P_' + str(PAA_percentile)
fav_zone, fav_zoneVal = helper_data.get_extreme_zones(dataType, _id, season_id, isCareer, 'positive', 'ROUND(zone_pct_plus-100,0)')
if fav_zoneVal >= 0:
fav_zoneTextAdd = "+"
else:
fav_zoneTextAdd = ""
fav_zoneTEXT = '$\mathbf{Favorite Zone}$ (Relative to League Averages) -- $\mathbf{' + str(fav_zone) + '}$ (' + str(fav_zoneTextAdd) + str(fav_zoneVal) + '% distribution)'
fav_twitter_zoneTEXT = 'Favorite Zone: ' + str(fav_zone)
skill_zone, skill_zoneVal = helper_data.get_extreme_zones(dataType, _id, season_id, isCareer, 'positive', 'ROUND(zone_efg_plus-100,0)')
if skill_zoneVal >= 0:
skill_zoneTextAdd = 'above'
else:
skill_zoneTextAdd = 'below'
skill_zoneTEXT = '$\mathbf{Best Skill}$ -- $\mathbf{' + str(skill_zone) + '}$ (' + str(abs(skill_zoneVal)) + '% ' + str(skill_zoneTextAdd) + ' average)'
skill_twitter_zoneTEXT = 'Best Skill Zone: ' + str(skill_zone)
value_zone, value_zoneVal = helper_data.get_extreme_zones(dataType, _id, season_id, isCareer, 'positive', 'ROUND(paa, 0)')
if value_zoneVal >= 0:
value_zoneTextAdd = "+"
else:
value_zoneTextAdd = ""
value_zoneTEXT = '$\mathbf{Best Value}$ -- $\mathbf{' + str(value_zone) + '}$ (' + str(value_zoneTextAdd) + str(value_zoneVal) + ' PAA)'
value_twitter_zoneTEXT = 'Best Value Zone: ' + str(value_zone)
LEASTskill_zone, LEASTskill_zoneVal = helper_data.get_extreme_zones(dataType, _id, season_id, isCareer, 'negative', 'ROUND(zone_efg_plus-100,0)')
if LEASTskill_zoneVal >= 0:
LEASTskill_zoneTextAdd = 'above'
else:
LEASTskill_zoneTextAdd = 'below'
LEASTskill_zoneTEXT = '$\mathbf{Worst Skill}$ -- $\mathbf{' + str(LEASTskill_zone) + '}$ (' + str(abs(LEASTskill_zoneVal)) + '% ' + str(LEASTskill_zoneTextAdd) + ' average)'
LEASTvalue_zone, LEASTvalue_zoneVal = helper_data.get_extreme_zones(dataType, _id, season_id, isCareer, 'negative', 'ROUND(paa, 0)')
if LEASTvalue_zoneVal >= 0:
LEASTvalue_zoneTextAdd = "+"
else:
LEASTvalue_zoneTextAdd = ""
LEASTvalue_zoneTEXT = '$\mathbf{Least Value}$ -- $\mathbf{' + str(LEASTvalue_zone) + '}$ (' + str(LEASTvalue_zoneTextAdd) + str(LEASTvalue_zoneVal) + ' PAA)'
if isTwitter is False:
text += vol_text
text += '\n'+shotSkill_text
text += '\n'+efg_text
text += '\n'+PAA_text
text += '\n'+fav_zoneTEXT
text += '\n'+skill_zoneTEXT
text += ' | '+value_zoneTEXT
text += '\n'+LEASTskill_zoneTEXT
text += ' | '+LEASTvalue_zoneTEXT
else:
text += ':\n\n'+vol_twitter_text
text += '\n'+shotSkill_twitter_text
text += '\n'+efg_twitter_text
text += '\n'+PAA_twitter_text
text += '\n\n'+fav_twitter_zoneTEXT
text += '\n'+skill_twitter_zoneTEXT
text += '\n'+value_twitter_zoneTEXT
return text
#Getting the shooting percentages for each grid.
#The general idea of this function, as well as a substantial block of the actual code was recycled from Dan Vatterott [http://www.danvatterott.com/]
def find_shootingPcts(shot_df, gridNum):
x = shot_df.LOC_X[shot_df['LOC_Y']<425.1]
y = shot_df.LOC_Y[shot_df['LOC_Y']<425.1]
# Grabbing the x and y coords, for all made shots
x_made = shot_df.LOC_X[(shot_df['SHOT_MADE_FLAG']==1) & (shot_df['LOC_Y']<425.1)]
y_made = shot_df.LOC_Y[(shot_df['SHOT_MADE_FLAG']==1) & (shot_df['LOC_Y']<425.1)]
#compute number of shots made and taken from each hexbin location
hb_shot = plt.hexbin(x, y, gridsize=gridNum, extent=(-250,250,425,-50));
plt.close()
hb_made = plt.hexbin(x_made, y_made, gridsize=gridNum, extent=(-250,250,425,-50));
plt.close()
#compute shooting percentage
ShootingPctLocs = hb_made.get_array() / hb_shot.get_array()
ShootingPctLocs[np.isnan(ShootingPctLocs)] = 0 #makes 0/0s=0
shot_count_all = len(shot_df.index)
# Returning all values
return (ShootingPctLocs, hb_shot), shot_count_all
#Getting the player picture that we will later place in the chart
#Most of this code was recycled from Savvas Tjortjoglou [http://savvastjortjoglou.com]
def acquire_playerPic(player_id, zoom, offset=(250,370)):
try:
img_path = os.getcwd()+'/'+str(player_id)+'.png'
player_pic = plt.imread(img_path)
except (ValueError,IOError):
try:
pic = urllib.urlretrieve("https://ak-static.cms.nba.com/wp-content/uploads/headshots/nba/latest/260x190/"+str(player_id)+".png",str(player_id)+".png")
player_pic = plt.imread(pic[0])
except (ValueError, IOError):
try:
pic = urllib.urlretrieve("http://stats.nba.com/media/players/230x185/"+str(player_id)+".png",str(player_id)+".png")
player_pic = plt.imread(pic[0])
except (ValueError, IOError):
img_path = os.getcwd()+'/chart_icon.png'
player_pic = plt.imread(img_path)
img = osb.OffsetImage(player_pic, zoom)
img = osb.AnnotationBbox(img, offset,xycoords='data',pad=0.0, box_alignment=(1,0), frameon=False)
return img
#Getting the team picture that we will later place in the chart
def acquire_teamPic(season_id, team_title, team_id, zoom, offset=(250,370)):
abb_file = os.getcwd()+"/../csvs/team_abbreviations.csv"
abb_list = {}
with open(abb_file, 'rU') as f:
mycsv = csv.reader(f)
for row in mycsv:
team, abb, imgurl = row
abb_list[team] = [abb, imgurl]
img_url = abb_list.get(team_title)[1]
try:
img_path = os.getcwd()+'/'+str(team_id)+'.png'
team_pic = plt.imread(img_path)
except IOError:
try:
pic = urllib.urlretrieve(img_url,str(team_id)+'.png')
team_pic = plt.imread(pic[0])
except (ValueError, IOError):
img_path = os.getcwd()+'/nba_logo.png'
player_pic = plt.imread(img_path)
img = osb.OffsetImage(team_pic, zoom)
img = osb.AnnotationBbox(img, offset,xycoords='data',pad=0.0, box_alignment=(1,0), frameon=False)
return img
#Producing the text for the bottom of the shot chart
def get_teams_text(player_id, season_id, isCareer):
if isCareer is True:
season_q = ''
else:
season_q = '\nAND season_id = %s' % (season_id.replace('-',''))
team_q = """SELECT
DISTINCT CONCAT(city, ' ', tname)
FROM shots s
JOIN teams t USING (team_id)
WHERE player_id = %s%s
AND LEFT(season_id, 4) >= t.start_year
AND LEFT(season_id, 4) < t.end_year;
"""
team_qry = team_q % (player_id, season_q)
# raw_input(team_qry)
teams = db.query(team_qry)
team_list = []
for team in teams:
team_list.append(team[0])
team_text = ""
if len(team_list) == 1:
team_text = str(team_list[0])
else:
i = 0
for team in team_list[0:-1]:
if i%3 == 0 and i > 0:
team_text += '\n'
text_add = '%s, ' % str(team)
team_text += text_add
i += 1
if i%3 == 0:
team_text += '\n'
# raw_input(team_list)
team_text += str(team_list[-1])
return team_text, len(team_list)
| mit |
chrisbarber/dask | dask/array/tests/test_slicing.py | 2 | 18946 | import pytest
pytest.importorskip('numpy')
import itertools
from operator import getitem
from dask.compatibility import skip
import dask.array as da
from dask.array.slicing import (slice_array, _slice_1d, take, new_blockdim,
sanitize_index)
from dask.array.utils import assert_eq
import numpy as np
from toolz import merge
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def test_slice_1d():
expected = {0: slice(10, 25, 1), 1: slice(None, None, None), 2: slice(0, 1, 1)}
result = _slice_1d(100, [25] * 4, slice(10, 51, None))
assert expected == result
# x[100:12:-3]
expected = {0: slice(-2, -8, -3),
1: slice(-1, -21, -3),
2: slice(-3, -21, -3),
3: slice(-2, -21, -3),
4: slice(-1, -21, -3)}
result = _slice_1d(100, [20] * 5, slice(100, 12, -3))
assert expected == result
# x[102::-3]
expected = {0: slice(-2, -21, -3),
1: slice(-1, -21, -3),
2: slice(-3, -21, -3),
3: slice(-2, -21, -3),
4: slice(-1, -21, -3)}
result = _slice_1d(100, [20] * 5, slice(102, None, -3))
assert expected == result
# x[::-4]
expected = {0: slice(-1, -21, -4),
1: slice(-1, -21, -4),
2: slice(-1, -21, -4),
3: slice(-1, -21, -4),
4: slice(-1, -21, -4)}
result = _slice_1d(100, [20] * 5, slice(None, None, -4))
assert expected == result
# x[::-7]
expected = {0: slice(-5, -21, -7),
1: slice(-4, -21, -7),
2: slice(-3, -21, -7),
3: slice(-2, -21, -7),
4: slice(-1, -21, -7)}
result = _slice_1d(100, [20] * 5, slice(None, None, -7))
assert expected == result
# x=range(115)
# x[::-7]
expected = {0: slice(-7, -24, -7),
1: slice(-2, -24, -7),
2: slice(-4, -24, -7),
3: slice(-6, -24, -7),
4: slice(-1, -24, -7)}
result = _slice_1d(115, [23] * 5, slice(None, None, -7))
assert expected == result
# x[79::-3]
expected = {0: slice(-1, -21, -3),
1: slice(-3, -21, -3),
2: slice(-2, -21, -3),
3: slice(-1, -21, -3)}
result = _slice_1d(100, [20] * 5, slice(79, None, -3))
assert expected == result
# x[-1:-8:-1]
expected = {4: slice(-1, -8, -1)}
result = _slice_1d(100, [20, 20, 20, 20, 20], slice(-1, 92, -1))
assert expected == result
# x[20:0:-1]
expected = {0: slice(-1, -20, -1),
1: slice(-20, -21, -1)}
result = _slice_1d(100, [20, 20, 20, 20, 20], slice(20, 0, -1))
assert expected == result
# x[:0]
expected = {}
result = _slice_1d(100, [20, 20, 20, 20, 20], slice(0))
assert result
# x=range(99)
expected = {0: slice(-3, -21, -3),
1: slice(-2, -21, -3),
2: slice(-1, -21, -3),
3: slice(-2, -20, -3),
4: slice(-1, -21, -3)}
# This array has non-uniformly sized blocks
result = _slice_1d(99, [20, 20, 20, 19, 20], slice(100, None, -3))
assert expected == result
# x=range(104)
# x[::-3]
expected = {0: slice(-1, -21, -3),
1: slice(-3, -24, -3),
2: slice(-3, -28, -3),
3: slice(-1, -14, -3),
4: slice(-1, -22, -3)}
# This array has non-uniformly sized blocks
result = _slice_1d(104, [20, 23, 27, 13, 21], slice(None, None, -3))
assert expected == result
# x=range(104)
# x[:27:-3]
expected = {1: slice(-3, -16, -3),
2: slice(-3, -28, -3),
3: slice(-1, -14, -3),
4: slice(-1, -22, -3)}
# This array has non-uniformly sized blocks
result = _slice_1d(104, [20, 23, 27, 13, 21], slice(None, 27, -3))
assert expected == result
# x=range(104)
# x[100:27:-3]
expected = {1: slice(-3, -16, -3),
2: slice(-3, -28, -3),
3: slice(-1, -14, -3),
4: slice(-4, -22, -3)}
# This array has non-uniformly sized blocks
result = _slice_1d(104, [20, 23, 27, 13, 21], slice(100, 27, -3))
assert expected == result
def test_slice_singleton_value_on_boundary():
assert _slice_1d(15, [5, 5, 5], 10) == {2: 0}
assert _slice_1d(30, (5, 5, 5, 5, 5, 5), 10) == {2: 0}
def test_slice_array_1d():
#x[24::2]
expected = {('y', 0): (getitem, ('x', 0), (slice(24, 25, 2),)),
('y', 1): (getitem, ('x', 1), (slice(1, 25, 2),)),
('y', 2): (getitem, ('x', 2), (slice(0, 25, 2),)),
('y', 3): (getitem, ('x', 3), (slice(1, 25, 2),))}
result, chunks = slice_array('y', 'x', [[25] * 4], [slice(24, None, 2)])
assert expected == result
#x[26::2]
expected = {('y', 0): (getitem, ('x', 1), (slice(1, 25, 2),)),
('y', 1): (getitem, ('x', 2), (slice(0, 25, 2),)),
('y', 2): (getitem, ('x', 3), (slice(1, 25, 2),))}
result, chunks = slice_array('y', 'x', [[25] * 4], [slice(26, None, 2)])
assert expected == result
#x[24::2]
expected = {('y', 0): (getitem, ('x', 0), (slice(24, 25, 2),)),
('y', 1): (getitem, ('x', 1), (slice(1, 25, 2),)),
('y', 2): (getitem, ('x', 2), (slice(0, 25, 2),)),
('y', 3): (getitem, ('x', 3), (slice(1, 25, 2),))}
result, chunks = slice_array('y', 'x', [(25, ) * 4], (slice(24, None, 2), ))
assert expected == result
#x[26::2]
expected = {('y', 0): (getitem, ('x', 1), (slice(1, 25, 2),)),
('y', 1): (getitem, ('x', 2), (slice(0, 25, 2),)),
('y', 2): (getitem, ('x', 3), (slice(1, 25, 2),))}
result, chunks = slice_array('y', 'x', [(25, ) * 4], (slice(26, None, 2), ))
assert expected == result
def test_slice_array_2d():
#2d slices: x[13::2,10::1]
expected = {('y', 0, 0): (getitem, ('x', 0, 0),
(slice(13, 20, 2), slice(10, 20, 1))),
('y', 0, 1): (getitem, ('x', 0, 1),
(slice(13, 20, 2), slice(None, None, None))),
('y', 0, 2): (getitem, ('x', 0, 2),
(slice(13, 20, 2), slice(None, None, None)))}
result, chunks = slice_array('y', 'x', [[20], [20, 20, 5]],
[slice(13, None, 2), slice(10, None, 1)])
assert expected == result
#2d slices with one dimension: x[5,10::1]
expected = {('y', 0): (getitem, ('x', 0, 0),
(5, slice(10, 20, 1))),
('y', 1): (getitem, ('x', 0, 1),
(5, slice(None, None, None))),
('y', 2): (getitem, ('x', 0, 2),
(5, slice(None, None, None)))}
result, chunks = slice_array('y', 'x', ([20], [20, 20, 5]),
[5, slice(10, None, 1)])
assert expected == result
def test_slice_optimizations():
#bar[:]
expected = {('foo', 0): ('bar', 0)}
result, chunks = slice_array('foo', 'bar', [[100]], (slice(None, None, None),))
assert expected == result
#bar[:,:,:]
expected = {('foo', 0): ('bar', 0),
('foo', 1): ('bar', 1),
('foo', 2): ('bar', 2)}
result, chunks = slice_array('foo', 'bar', [(100, 1000, 10000)],
(slice(None, None, None),
slice(None, None, None),
slice(None, None, None)))
assert expected == result
def test_slicing_with_singleton_indices():
result, chunks = slice_array('y', 'x', ([5, 5], [5, 5]), (slice(0, 5), 8))
expected = {('y', 0): (getitem, ('x', 0, 1), (slice(None, None, None), 3))}
assert expected == result
def test_slicing_with_newaxis():
result, chunks = slice_array('y', 'x', ([5, 5], [5, 5]),
(slice(0, 3), None, slice(None, None, None)))
expected = {
('y', 0, 0, 0): (getitem, ('x', 0, 0),
(slice(0, 3, 1), None, slice(None, None, None))),
('y', 0, 0, 1): (getitem, ('x', 0, 1),
(slice(0, 3, 1), None, slice(None, None, None)))}
assert expected == result
assert chunks == ((3,), (1,), (5, 5))
def test_take():
chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], axis=0)
expected = {('y', 0): (getitem, (np.concatenate,
[(getitem, ('x', 0), ([1, 3, 5],)),
(getitem, ('x', 2), ([7],))], 0),
([2, 0, 3, 1], ))}
assert dsk == expected
assert chunks == ((4,),)
chunks, dsk = take('y', 'x', [(20, 20, 20, 20), (20, 20)], [5, 1, 47, 3], axis=0)
expected = {('y', 0, j): (getitem, (np.concatenate,
[(getitem, ('x', 0, j),
([1, 3, 5], slice(None, None, None))),
(getitem, ('x', 2, j),
([7], slice(None, None, None)))], 0),
([2, 0, 3, 1], slice(None, None, None)))
for j in range(2)}
assert dsk == expected
assert chunks == ((4,), (20, 20))
chunks, dsk = take('y', 'x', [(20, 20, 20, 20), (20, 20)], [5, 1, 37, 3], axis=1)
expected = {('y', i, 0): (getitem, (np.concatenate,
[(getitem, ('x', i, 0),
(slice(None, None, None), [1, 3, 5])),
(getitem, ('x', i, 1),
(slice(None, None, None), [17]))], 1),
(slice(None, None, None), [2, 0, 3, 1]))
for i in range(4)}
assert dsk == expected
assert chunks == ((20, 20, 20, 20), (4,))
def test_take_sorted():
chunks, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], axis=0)
expected = {('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 1): (getitem, ('x', 2), ([7],))}
assert dsk == expected
assert chunks == ((3, 1),)
chunks, dsk = take('y', 'x', [(20, 20, 20, 20), (20, 20)], [1, 3, 5, 37], axis=1)
expected = merge(dict((('y', i, 0), (getitem, ('x', i, 0),
(slice(None, None, None), [1, 3, 5])))
for i in range(4)),
dict((('y', i, 1), (getitem, ('x', i, 1),
(slice(None, None, None), [17])))
for i in range(4)))
assert dsk == expected
assert chunks == ((20, 20, 20, 20), (3, 1))
def test_slice_lists():
y, chunks = slice_array('y', 'x', ((3, 3, 3, 1), (3, 3, 3, 1)),
([2, 1, 9], slice(None, None, None)))
exp = {('y', 0, i): (getitem, (np.concatenate,
[(getitem, ('x', 0, i),
([1, 2], slice(None, None, None))),
(getitem, ('x', 3, i),
([0], slice(None, None, None)))], 0),
([1, 0, 2], slice(None, None, None)))
for i in range(4)}
assert y == exp
assert chunks == ((3,), (3, 3, 3, 1))
def test_slicing_chunks():
result, chunks = slice_array('y', 'x', ([5, 5], [5, 5]),
(1, [2, 0, 3]))
assert chunks == ((3,), )
result, chunks = slice_array('y', 'x', ([5, 5], [5, 5]),
(slice(0, 7), [2, 0, 3]))
assert chunks == ((5, 2), (3, ))
result, chunks = slice_array('y', 'x', ([5, 5], [5, 5]),
(slice(0, 7), 1))
assert chunks == ((5, 2), )
def test_slicing_with_numpy_arrays():
a, bd1 = slice_array('y', 'x', ((3, 3, 3, 1), (3, 3, 3, 1)),
([1, 2, 9], slice(None, None, None)))
b, bd2 = slice_array('y', 'x', ((3, 3, 3, 1), (3, 3, 3, 1)),
(np.array([1, 2, 9]), slice(None, None, None)))
assert bd1 == bd2
assert a == b
i = [False, True, True, False, False,
False, False, False, False, True, False]
c, bd3 = slice_array('y', 'x', ((3, 3, 3, 1), (3, 3, 3, 1)),
(i, slice(None, None, None)))
assert bd1 == bd3
assert a == c
def test_slicing_and_chunks():
o = da.ones((24, 16), chunks=((4, 8, 8, 4), (2, 6, 6, 2)))
t = o[4:-4, 2:-2]
assert t.chunks == ((8, 8), (6, 6))
def test_slice_stop_0():
# from gh-125
a = da.ones(10, chunks=(10,))[:0].compute()
b = np.ones(10)[:0]
assert_eq(a, b)
def test_slice_list_then_None():
x = da.zeros(shape=(5, 5), chunks=(3, 3))
y = x[[2, 1]][None]
assert_eq(y, np.zeros((1, 2, 5)))
class ReturnItem(object):
def __getitem__(self, key):
return key
@skip
def test_slicing_exhaustively():
x = np.random.rand(6, 7, 8)
a = da.from_array(x, chunks=(3, 3, 3))
I = ReturnItem()
# independent indexing along different axes
indexers = [0, -2, I[:], I[:5], [0, 1], [0, 1, 2], [4, 2], I[::-1], None, I[:0], []]
for i in indexers:
assert_eq(x[i], a[i]), i
for j in indexers:
assert_eq(x[i][:, j], a[i][:, j]), (i, j)
assert_eq(x[:, i][j], a[:, i][j]), (i, j)
for k in indexers:
assert_eq(x[..., i][:, j][k], a[..., i][:, j][k]), (i, j, k)
# repeated indexing along the first axis
first_indexers = [I[:], I[:5], np.arange(5), [3, 1, 4, 5, 0], np.arange(6) < 6]
second_indexers = [0, -1, 3, I[:], I[:3], I[2:-1], [2, 4], [], I[:0]]
for i in first_indexers:
for j in second_indexers:
assert_eq(x[i][j], a[i][j]), (i, j)
def test_slicing_with_negative_step_flops_keys():
x = da.arange(10, chunks=5)
y = x[:1:-1]
assert (x.name, 1) in y.dask[(y.name, 0)]
assert (x.name, 0) in y.dask[(y.name, 1)]
assert_eq(y, np.arange(10)[:1:-1])
assert y.chunks == ((5, 3),)
assert y.dask[(y.name, 0)] == (getitem, (x.name, 1),
(slice(-1, -6, -1),))
assert y.dask[(y.name, 1)] == (getitem, (x.name, 0),
(slice(-1, -4, -1),))
def test_empty_slice():
x = da.ones((5, 5), chunks=(2, 2), dtype='i4')
y = x[:0]
assert_eq(y, np.ones((5, 5), dtype='i4')[:0])
def test_multiple_list_slicing():
x = np.random.rand(6, 7, 8)
a = da.from_array(x, chunks=(3, 3, 3))
assert_eq(x[:, [0, 1, 2]][[0, 1]], a[:, [0, 1, 2]][[0, 1]])
def test_uneven_chunks():
assert da.ones(20, chunks=5)[::2].chunks == ((3, 2, 3, 2),)
def test_new_blockdim():
assert new_blockdim(20, [5, 5, 5, 5], slice(0, None, 2)) == [3, 2, 3, 2]
def test_slicing_consistent_names():
x = np.arange(100).reshape((10, 10))
a = da.from_array(x, chunks=(5, 5))
assert same_keys(a[0], a[0])
assert same_keys(a[:, [1, 2, 3]], a[:, [1, 2, 3]])
assert same_keys(a[:, 5:2:-1], a[:, 5:2:-1])
def test_sanitize_index():
pd = pytest.importorskip('pandas')
with pytest.raises(TypeError):
sanitize_index('Hello!')
assert sanitize_index(pd.Series([1, 2, 3])) == [1, 2, 3]
assert sanitize_index((1, 2, 3)) == [1, 2, 3]
def test_uneven_blockdims():
blockdims = ((31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30), (100,))
index = (slice(240, 270), slice(None))
dsk_out, bd_out = slice_array('in', 'out', blockdims, index)
sol = {('in', 0, 0): (getitem, ('out', 7, 0), (slice(28, 31, 1), slice(None))),
('in', 1, 0): (getitem, ('out', 8, 0), (slice(0, 27, 1), slice(None)))}
assert dsk_out == sol
assert bd_out == ((3, 27), (100,))
blockdims = ((31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30),) * 2
index = (slice(240, 270), slice(180, 230))
dsk_out, bd_out = slice_array('in', 'out', blockdims, index)
sol = {('in', 0, 0): (getitem, ('out', 7, 5), (slice(28, 31, 1), slice(29, 30, 1))),
('in', 0, 1): (getitem, ('out', 7, 6), (slice(28, 31, 1), slice(None))),
('in', 0, 2): (getitem, ('out', 7, 7), (slice(28, 31, 1), slice(0, 18, 1))),
('in', 1, 0): (getitem, ('out', 8, 5), (slice(0, 27, 1), slice(29, 30, 1))),
('in', 1, 1): (getitem, ('out', 8, 6), (slice(0, 27, 1), slice(None))),
('in', 1, 2): (getitem, ('out', 8, 7), (slice(0, 27, 1), slice(0, 18, 1)))}
assert dsk_out == sol
assert bd_out == ((3, 27), (1, 31, 18))
def test_oob_check():
x = da.ones(5, chunks=(2,))
with pytest.raises(IndexError):
x[6]
with pytest.raises(IndexError):
x[[6]]
with pytest.raises(IndexError):
x[0, 0]
def test_index_with_dask_array_errors():
x = da.ones((5, 5), chunks=2)
with pytest.raises(NotImplementedError):
x[0, x > 10]
def test_cull():
x = da.ones(1000, chunks=(10,))
for slc in [1, slice(0, 30), slice(0, None, 100)]:
y = x[slc]
assert len(y.dask) < len(x.dask)
@pytest.mark.parametrize('shape', [(2,), (2, 3), (2, 3, 5)])
@pytest.mark.parametrize('slice', [(Ellipsis,),
(None, Ellipsis),
(Ellipsis, None),
(None, Ellipsis, None)])
def test_slicing_with_Nones(shape, slice):
x = np.random.random(shape)
d = da.from_array(x, chunks=shape)
assert_eq(x[slice], d[slice])
indexers = [Ellipsis, slice(2), 0, 1, -2, -1, slice(-2, None), None]
"""
@pytest.mark.parametrize('a', indexers)
@pytest.mark.parametrize('b', indexers)
@pytest.mark.parametrize('c', indexers)
@pytest.mark.parametrize('d', indexers)
def test_slicing_none_int_ellipses(a, b, c, d):
if (a, b, c, d).count(Ellipsis) > 1:
return
shape = (2,3,5,7,11)
x = np.arange(np.prod(shape)).reshape(shape)
y = da.core.asarray(x)
xx = x[a, b, c, d]
yy = y[a, b, c, d]
assert_eq(xx, yy)
"""
def test_slicing_none_int_ellipes():
shape = (2,3,5,7,11)
x = np.arange(np.prod(shape)).reshape(shape)
y = da.core.asarray(x)
for ind in itertools.product(indexers, indexers, indexers, indexers):
if ind.count(Ellipsis) > 1:
continue
assert_eq(x[ind], y[ind])
def test_None_overlap_int():
a, b, c, d = (0, slice(None, 2, None), None, Ellipsis)
shape = (2,3,5,7,11)
x = np.arange(np.prod(shape)).reshape(shape)
y = da.core.asarray(x)
xx = x[a, b, c, d]
yy = y[a, b, c, d]
assert_eq(xx, yy)
def test_negative_n_slicing():
assert_eq(da.ones(2, chunks=2)[-2], np.ones(2)[-2])
| bsd-3-clause |
mehdidc/py-earth | examples/plot_derivatives.py | 4 | 1177 | """
============================================
Plotting derivatives of simple sine function
============================================
A simple example plotting a fit of the sine function and
the derivatives computed by Earth.
"""
import numpy
import matplotlib.pyplot as plt
from pyearth import Earth
# Create some fake data
numpy.random.seed(2)
m = 10000
n = 10
X = 20 * numpy.random.uniform(size=(m, n)) - 10
y = 10*numpy.sin(X[:, 6]) + 0.25*numpy.random.normal(size=m)
# Compute the known true derivative with respect to the predictive variable
y_prime = 10*numpy.cos(X[:, 6])
# Fit an Earth model
model = Earth(max_degree=2, minspan_alpha=.5, smooth=True)
model.fit(X, y)
# Print the model
print(model.trace())
print(model.summary())
# Get the predicted values and derivatives
y_hat = model.predict(X)
y_prime_hat = model.predict_deriv(X, 'x6')
# Plot true and predicted function values and derivatives
# for the predictive variable
plt.subplot(211)
plt.plot(X[:, 6], y, 'r.')
plt.plot(X[:, 6], y_hat, 'b.')
plt.ylabel('function')
plt.subplot(212)
plt.plot(X[:, 6], y_prime, 'r.')
plt.plot(X[:, 6], y_prime_hat[:, 0], 'b.')
plt.ylabel('derivative')
plt.show()
| bsd-3-clause |
holla2040/valvestudio | projects/pentode/operatingPointDesign/src/babysteps/imd/imd-300-400.py | 1 | 3440 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
freqset = '300-400'
ampset = 'even20'
ftable = {
'100':(100,0,0),
'300':(300,0,0),
'400':(400,0,0),
'300-400':(300,400,0),
'GDG':(98,147,196),
'Gm' :(196,247,294)
}
gtable = {
'test' : (0.00,1.0,2.0,0,0),
'clean' : (2.00,0,0,0,0),
'even1' : (2.0,500e-1,2.0,0,0),
'even20' : (2.0,1e-1,2.0,0,0),
'even40' : (2.0,1e-2,2.0,0,0),
'even60' : (2.0,1e-3,2.0,0,0),
'even1020' : (1.0,100e-1,2.0,100e-2,4.0),
'even2040' : (2.0,1e-1,2.0,1e-2,4.0),
'even6080' : (2.0,1e-3,2.0,1e-4,4.0),
'odd20' : (2.0,1e-1,3.0,0,0),
'odd40' : (2.0,1e-2,3.0,0,0),
'odd2040' : (2.0,1e-1,3.0,1e-2,5.0),
'odd6080' : (2.0,1e-3,3.0,1e-4,5.0),
'odd1020' : (1.0,100e-1,3.0,100e-2,5.0),
'tubeeven20': (26,27.0,-1.6,0.3,0),
'tubeeven40': (55,70.0,-1.0,0.05,0),
'tubeodd20' : (0,1.0,0.0,1.5,0),
'tubeodd40' : (0,5.0,0.0,0.35,0),
}
f1,f2,f3 = ftable[freqset]
a0,a1,af1,a2,af2 = gtable[ampset]
# print a0,a1,af1,a2,af2
Fs = 44000.0; # sampling rate
Ts = 1.0/Fs; # sampling interval
t = np.arange(0,1,Ts) # time vector
vin1 = np.sin(2*np.pi*f1*t)
vin2 = np.sin(2*np.pi*f2*t)
vin3 = np.sin(2*np.pi*f3*t)
vin = vin1 + vin2 + vin3
if ampset.count('tube'):
vout = a0+a1*np.arctan(af1+a2*vin)
else:
vout = a0*vin + a1*(vin**af1) + a2*(vin**af2) # amplifier non-linear
vout = vout + np.random.normal(0.0,0.1,Fs)/100.0
n = len(vout) # length of the signal
k = np.arange(n)
T = n/Fs
frq = k/T # two sides frequency range
frq = frq[range(n/2)] # one side frequency range
Y = np.fft.fft(vout)/n # fft computing and normalization
Y = Y[range(n/2)]
mag = 20*np.log10(np.abs(Y))
peakindices = mag > -90
peakfrqs = frq[peakindices]
peaks = mag[peakindices]
peaksgtdc = len(frq[peakfrqs > 10])
fig, ax = plt.subplots(3, 1,figsize=(10, 20))
ax[0].plot(t,vin1,label='%dHz'%f1)
if f2:
ax[0].plot(t,vin2,label='%dHz'%f2)
if f3:
ax[0].plot(t,vin3,label='%dHz'%f3)
ax[0].set_xlabel('Time')
ax[0].set_xlim(0,5.0/f1)
ax[0].set_ylabel('Amplitude')
handles, labels = ax[0].get_legend_handles_labels()
ax[0].legend(handles[::-1], labels[::-1])
ax[1].plot(t,vin, label='Vin')
ax[1].plot(t,vout, label='Vout')
ax[1].set_xlabel('Time')
ax[1].set_xlim(0,5.0/f1)
ax[1].set_ylabel('Amplitude')
handles, labels = ax[1].get_legend_handles_labels()
ax[1].legend(handles[::-1], labels[::-1])
if peaksgtdc == 1:
plabel = "Peak"
else:
plabel = "Peaks"
ax[2].semilogx(frq,mag,'r',label="%s\n%d %s"%(ampset,peaksgtdc,plabel)) # plotting the spectrum
ax[2].set_xlabel('Freq (Hz)')
ax[2].set_xlim(10,20000)
ax[2].grid(True,'both')
ax[2].set_ylabel('Vout dB')
ax[2].set_ylim(-120,20)
handles, labels = ax[2].get_legend_handles_labels()
ax[2].legend(handles[::-1], labels[::-1])
for i in range(len(peakfrqs)):
ax[2].annotate("%.0f,%.1f"%(peakfrqs[i],peaks[i]),
xy=(peakfrqs[i],peaks[i]),
xycoords='data',
xytext=(-5,8),
textcoords='offset points',
verticalalignment='left',
rotation=90,
bbox=dict(boxstyle="round", fc="1.0"),
size=10)
print
print peaksgtdc,"peaks above DC"
print "Hz dB"
for i in range(len(peakfrqs)):
print "%-7.0f %-0.2f"%(peakfrqs[i],peaks[i])
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.show()
| mit |
slinderman/pyhawkes | examples/inference/gibbs_ss_demo.py | 1 | 8978 | import numpy as np
import os
import pickle
import gzip
# np.seterr(all='raise')
import matplotlib.pyplot as plt
from sklearn.metrics import adjusted_mutual_info_score, \
adjusted_rand_score, roc_auc_score
from pyhawkes.internals.network import StochasticBlockModel
from pyhawkes.models import \
DiscreteTimeNetworkHawkesModelSpikeAndSlab, \
DiscreteTimeStandardHawkesModel
def demo(seed=None):
"""
Create a discrete time Hawkes model and generate from it.
:return:
"""
if seed is None:
seed = np.random.randint(2**32)
print("Setting seed to ", seed)
np.random.seed(seed)
###########################################################
# Load some example data.
# See data/synthetic/generate.py to create more.
###########################################################
data_path = os.path.join("data", "synthetic", "synthetic_K20_C4_T10000.pkl.gz")
with gzip.open(data_path, 'r') as f:
S, true_model = pickle.load(f)
T = S.shape[0]
K = true_model.K
B = true_model.B
dt = true_model.dt
dt_max = true_model.dt_max
###########################################################
# Initialize with MAP estimation on a standard Hawkes model
###########################################################
init_with_map = True
if init_with_map:
init_len = T
print("Initializing with BFGS on first ", init_len, " time bins.")
init_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, dt_max=dt_max, B=B,
alpha=1.0, beta=1.0)
init_model.add_data(S[:init_len, :])
init_model.initialize_to_background_rate()
init_model.fit_with_bfgs()
else:
init_model = None
###########################################################
# Create a test spike and slab model
###########################################################
# Copy the network hypers.
# Give the test model p, but not c, v, or m
network_hypers = true_model.network_hypers.copy()
network_hypers['c'] = None
network_hypers['v'] = None
network_hypers['m'] = None
test_network = StochasticBlockModel(K=K, **network_hypers)
test_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max, B=B,
basis_hypers=true_model.basis_hypers,
bkgd_hypers=true_model.bkgd_hypers,
impulse_hypers=true_model.impulse_hypers,
weight_hypers=true_model.weight_hypers,
network=test_network)
test_model.add_data(S)
# F_test = test_model.basis.convolve_with_basis(S_test)
# Initialize with the standard model parameters
if init_model is not None:
test_model.initialize_with_standard_model(init_model)
# Initialize plots
ln, im_net, im_clus = initialize_plots(true_model, test_model, S)
###########################################################
# Fit the test model with Gibbs sampling
###########################################################
N_samples = 50
samples = []
lps = []
# plls = []
for itr in range(N_samples):
lps.append(test_model.log_probability())
# plls.append(test_model.heldout_log_likelihood(S_test, F=F_test))
samples.append(test_model.copy_sample())
print("")
print("Gibbs iteration ", itr)
print("LP: ", lps[-1])
test_model.resample_model()
# Update plot
if itr % 1 == 0:
update_plots(itr, test_model, S, ln, im_clus, im_net)
###########################################################
# Analyze the samples
###########################################################
analyze_samples(true_model, init_model, samples, lps)
def initialize_plots(true_model, test_model, S):
K = true_model.K
C = true_model.network.C
R = true_model.compute_rate(S=S)
T = S.shape[0]
# Plot the true network
# Figure 1
plt.figure(1)
ax = plt.subplot(111)
plt.ion()
true_model.plot_adjacency_matrix(ax=ax, vmax=0.25)
plt.pause(0.001)
# Figure 2
# Plot the true and inferred firing rate
plt.figure(2)
plt.plot(np.arange(T), R[:,0], '-k', lw=2)
plt.ion()
ln = plt.plot(np.arange(T), test_model.compute_rate()[:,0], '-r')[0]
plt.show()
# Firgure 3
# Plot the block affiliations
plt.figure(3)
KC = np.zeros((K,C))
KC[np.arange(K), test_model.network.c] = 1.0
im_clus = plt.imshow(KC,
interpolation="none", cmap="Greys",
aspect=float(C)/K)
# Figure 4
plt.figure(4)
ax = plt.subplot(111)
im_net = test_model.plot_adjacency_matrix(ax=ax, vmax=0.25)
plt.pause(0.001)
plt.show()
plt.pause(0.001)
return ln, im_net, im_clus
def update_plots(itr, test_model, S, ln, im_clus, im_net):
K = test_model.K
C = test_model.network.C
T = S.shape[0]
plt.figure(2)
ln.set_data(np.arange(T), test_model.compute_rate()[:,0])
plt.title("\lambda_{%d}. Iteration %d" % (0, itr))
plt.pause(0.001)
plt.figure(3)
KC = np.zeros((K,C))
KC[np.arange(K), test_model.network.c] = 1.0
im_clus.set_data(KC)
plt.title("KxC: Iteration %d" % itr)
plt.pause(0.001)
plt.figure(4)
plt.title("W: Iteration %d" % itr)
im_net.set_data(test_model.weight_model.W_effective)
plt.pause(0.001)
def analyze_samples(true_model, init_model, samples, lps):
N_samples = len(samples)
# Compute sample statistics for second half of samples
A_samples = np.array([s.weight_model.A for s in samples])
W_samples = np.array([s.weight_model.W for s in samples])
g_samples = np.array([s.impulse_model.g for s in samples])
lambda0_samples = np.array([s.bias_model.lambda0 for s in samples])
c_samples = np.array([s.network.c for s in samples])
p_samples = np.array([s.network.p for s in samples])
v_samples = np.array([s.network.v for s in samples])
lps = np.array(lps)
offset = N_samples // 2
A_mean = A_samples[offset:, ...].mean(axis=0)
W_mean = W_samples[offset:, ...].mean(axis=0)
g_mean = g_samples[offset:, ...].mean(axis=0)
lambda0_mean = lambda0_samples[offset:, ...].mean(axis=0)
p_mean = p_samples[offset:, ...].mean(axis=0)
v_mean = v_samples[offset:, ...].mean(axis=0)
print("A true: ", true_model.weight_model.A)
print("W true: ", true_model.weight_model.W)
print("g true: ", true_model.impulse_model.g)
print("lambda0 true: ", true_model.bias_model.lambda0)
print("")
print("A mean: ", A_mean)
print("W mean: ", W_mean)
print("g mean: ", g_mean)
print("lambda0 mean: ", lambda0_mean)
print("v mean: ", v_mean)
print("p mean: ", p_mean)
plt.figure()
plt.plot(np.arange(N_samples), lps, 'k')
plt.xlabel("Iteration")
plt.ylabel("Log probability")
plt.show()
# # Predictive log likelihood
# pll_init = init_model.heldout_log_likelihood(S_test)
# plt.figure()
# plt.plot(np.arange(N_samples), pll_init * np.ones(N_samples), 'k')
# plt.plot(np.arange(N_samples), plls, 'r')
# plt.xlabel("Iteration")
# plt.ylabel("Predictive log probability")
# plt.show()
# Compute the link prediction accuracy curves
auc_init = roc_auc_score(true_model.weight_model.A.ravel(),
init_model.W.ravel())
auc_A_mean = roc_auc_score(true_model.weight_model.A.ravel(),
A_mean.ravel())
auc_W_mean = roc_auc_score(true_model.weight_model.A.ravel(),
W_mean.ravel())
aucs = []
for A in A_samples:
aucs.append(roc_auc_score(true_model.weight_model.A.ravel(), A.ravel()))
plt.figure()
plt.plot(aucs, '-r')
plt.plot(auc_A_mean * np.ones_like(aucs), '--r')
plt.plot(auc_W_mean * np.ones_like(aucs), '--b')
plt.plot(auc_init * np.ones_like(aucs), '--k')
plt.xlabel("Iteration")
plt.ylabel("Link prediction AUC")
plt.show()
# Compute the adjusted mutual info score of the clusterings
amis = []
arss = []
for c in c_samples:
amis.append(adjusted_mutual_info_score(true_model.network.c, c))
arss.append(adjusted_rand_score(true_model.network.c, c))
plt.figure()
plt.plot(np.arange(N_samples), amis, '-r')
plt.plot(np.arange(N_samples), arss, '-b')
plt.xlabel("Iteration")
plt.ylabel("Clustering score")
plt.ioff()
plt.show()
demo(11223344)
| mit |
ebigelow/LOTlib | LOTlib/Examples/NumberGame/Model/Data.py | 3 | 1600 | from LOTlib.DataAndObjects import FunctionData
def import_josh_data(path=None):
"""Script for loading Joshs' number game data.
Data is originally in probability (i.e. float) format, so (# yes, # no) pairs are estimated by
assuming 20 human participants.
"""
import os
from scipy.io import loadmat
if path is None:
path = os.getcwd()
mat = loadmat(path+'/number_game_data.mat')
mat_data = mat['data']
number_game_data = []
for d in mat_data:
input_data = d[0][0].tolist()
output_data = {}
for i in range(len(d[1][0])):
key = d[1][0][i]
associated_prob = d[2][0][i]
associated_yes = int(associated_prob * 20)
output_data[key] = (associated_yes, 20-associated_yes) # est. (# yes, # no) responses
function_datum = FunctionData(input=input_data, output=output_data)
number_game_data.append(function_datum)
return number_game_data
def import_pd_data(fname):
import pandas as pd
from collections import defaultdict
df = pd.read_pickle(fname)
grouped = df.groupby(['concept', 'target'], as_index=False)
data = defaultdict(lambda: FunctionData(input=[], output={}))
for (c, t), group in grouped:
y = sum(group['rating'])
n = len(group['rating']) - y
try:
concept = list(eval(c))
except:
concept = [eval(c)]
target = eval(t)
data[c].input = concept
data[c].output[target] = (y, n)
return data.values()
# josh_data = import_josh_data() | gpl-3.0 |
dhuang/incubator-airflow | airflow/contrib/operators/hive_to_dynamodb.py | 1 | 3826 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook
from airflow.hooks.hive_hooks import HiveServer2Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class HiveToDynamoDBTransferOperator(BaseOperator):
"""
Moves data from Hive to DynamoDB, note that for now the data is loaded
into memory before being pushed to DynamoDB, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the hive database
:type sql: str
:param table_name: target DynamoDB table
:type table_name: str
:param table_keys: partition key and sort key
:type table_keys: list
:param pre_process: implement pre-processing of source data
:type pre_process: function
:param pre_process_args: list of pre_process function arguments
:type pre_process_args: list
:param pre_process_kwargs: dict of pre_process function arguments
:type pre_process_kwargs: dict
:param region_name: aws region name (example: us-east-1)
:type region_name: str
:param schema: hive database schema
:type schema: str
:param hiveserver2_conn_id: source hive connection
:type hiveserver2_conn_id: str
:param aws_conn_id: aws connection
:type aws_conn_id: str
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
table_name,
table_keys,
pre_process=None,
pre_process_args=None,
pre_process_kwargs=None,
region_name=None,
schema='default',
hiveserver2_conn_id='hiveserver2_default',
aws_conn_id='aws_default',
*args, **kwargs):
super(HiveToDynamoDBTransferOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.table_name = table_name
self.table_keys = table_keys
self.pre_process = pre_process
self.pre_process_args = pre_process_args
self.pre_process_kwargs = pre_process_kwargs
self.region_name = region_name
self.schema = schema
self.hiveserver2_conn_id = hiveserver2_conn_id
self.aws_conn_id = aws_conn_id
def execute(self, context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
self.log.info('Extracting data from Hive')
self.log.info(self.sql)
data = hive.get_pandas_df(self.sql, schema=self.schema)
dynamodb = AwsDynamoDBHook(aws_conn_id=self.aws_conn_id,
table_name=self.table_name,
table_keys=self.table_keys,
region_name=self.region_name)
self.log.info('Inserting rows into dynamodb')
if self.pre_process is None:
dynamodb.write_batch_data(
json.loads(data.to_json(orient='records')))
else:
dynamodb.write_batch_data(
self.pre_process(data=data,
args=self.pre_process_args,
kwargs=self.pre_process_kwargs))
self.log.info('Done.')
| apache-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/scipy/signal/_arraytools.py | 28 | 7553 | """
Functions for acting on a axis of an array.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
"""Take a slice along axis 'axis' from 'a'.
Parameters
----------
a : numpy.ndarray
The array to be sliced.
start, stop, step : int or None
The slice parameters.
axis : int, optional
The axis of `a` to be sliced.
Examples
--------
>>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> axis_slice(a, start=0, stop=1, axis=1)
array([[1],
[4],
[7]])
>>> axis_slice(a, start=1, axis=0)
array([[4, 5, 6],
[7, 8, 9]])
Notes
-----
The keyword arguments start, stop and step are used by calling
slice(start, stop, step). This implies axis_slice() does not
handle its arguments the exacty the same as indexing. To select
a single index k, for example, use
axis_slice(a, start=k, stop=k+1)
In this case, the length of the axis 'axis' in the result will
be 1; the trivial dimension is not removed. (Use numpy.squeeze()
to remove trivial axes.)
"""
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[a_slice]
return b
def axis_reverse(a, axis=-1):
"""Reverse the 1-d slices of `a` along axis `axis`.
Returns axis_slice(a, step=-1, axis=axis).
"""
return axis_slice(a, step=-1, axis=axis)
def odd_ext(x, n, axis=-1):
"""
Odd extension at the boundaries of an array
Generate a new ndarray by making an odd extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import odd_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> odd_ext(a, 2)
array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
[-4, -1, 0, 1, 4, 9, 16, 23, 28]])
Odd extension is a "180 degree rotation" at the endpoints of the original
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = odd_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def even_ext(x, n, axis=-1):
"""
Even extension at the boundaries of an array
Generate a new ndarray by making an even extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import even_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> even_ext(a, 2)
array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3],
[ 4, 1, 0, 1, 4, 9, 16, 9, 4]])
Even extension is a "mirror image" at the boundaries of the original array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = even_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def const_ext(x, n, axis=-1):
"""
Constant extension at the boundaries of an array
Generate a new ndarray that is a constant extension of `x` along an axis.
The extension repeats the values at the first and last element of
the axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import const_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> const_ext(a, 2)
array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5],
[ 0, 0, 0, 1, 4, 9, 16, 16, 16]])
Constant extension continues with the same values as the endpoints of the
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = const_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def zero_ext(x, n, axis=-1):
"""
Zero padding at the boundaries of an array
Generate a new ndarray that is a zero padded extension of `x` along
an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the
axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import zero_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> zero_ext(a, 2)
array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0],
[ 0, 0, 0, 1, 4, 9, 16, 0, 0]])
"""
if n < 1:
return x
zeros_shape = list(x.shape)
zeros_shape[axis] = n
zeros = np.zeros(zeros_shape, dtype=x.dtype)
ext = np.concatenate((zeros, x, zeros), axis=axis)
return ext
| mit |
alexeyum/scikit-learn | examples/linear_model/plot_ols.py | 104 | 1936 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
scienceopen/python-matlab-examples | PlotExamples/xarray_matplotlib.py | 1 | 1569 | #!/usr/bin/env python
"""
matplotlib with datetime64 testing
"""
import xarray
from datetime import datetime
from matplotlib.pyplot import figure, show
import matplotlib.dates as mdates
import numpy as np
def test_plot2d_datetime():
t = np.arange('2010-05-04T12:05:00', '2010-05-04T12:05:01', dtype='datetime64[ms]')
y = np.random.randn(t.size)
# t = t.astype(datetime) # Matplotlib < 2.2
ax = figure().gca()
ax.plot(t, y)
def test_plot2d_xarray():
t = np.arange('2010-05-04T12:05:00', '2010-05-04T12:05:01', dtype='datetime64[ms]')
y = np.random.randn(t.size)
dat = xarray.DataArray(y, coords={'time': t}, dims=['time'])
dset = xarray.Dataset({'random1Dstuff': dat})
fg = figure()
ax = fg.subplots(3, 1, sharex=True)
dat.plot(ax=ax[0])
ax[1].plot(dat.time, dat)
dset['random1Dstuff'].plot(ax=ax[2])
def test_imshow_datetime():
"""
keogram using matplotlib imshow
"""
Ny = 500 # arbitrary
t = np.arange('2010-05-04T12:05', '2010-05-04T12:06', dtype='datetime64[s]').astype(datetime)
im = np.random.random((Ny, t.size))
y = range(t.size) # arbitrary
mt = mdates.date2num((t[0], t[-1])) # at least through Matplotlib 2.2
fig = figure()
ax = fig.gca()
ax.imshow(im, extent=[mt[0], mt[1], y[0], y[-1]], aspect='auto')
# %% datetime formatting
ax.xaxis_date() # like "num2date"
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
fig.autofmt_xdate()
if __name__ == '__main__':
np.testing.run_module_suite()
show()
| mit |
yonglehou/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
anurag313/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
lokeshpancharia/BuildingMachineLearningSystemsWithPython | ch04/blei_lda.py | 21 | 2601 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
from wordcloud import create_cloud
try:
from gensim import corpora, models, matutils
except:
print("import gensim failed.")
print()
print("Please install it")
raise
import matplotlib.pyplot as plt
import numpy as np
from os import path
NUM_TOPICS = 100
# Check that data exists
if not path.exists('./data/ap/ap.dat'):
print('Error: Expected data to be present at data/ap/')
print('Please cd into ./data & run ./download_ap.sh')
# Load the data
corpus = corpora.BleiCorpus('./data/ap/ap.dat', './data/ap/vocab.txt')
# Build the topic model
model = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=None)
# Iterate over all the topics in the model
for ti in range(model.num_topics):
words = model.show_topic(ti, 64)
tf = sum(f for f, w in words)
with open('topics.txt', 'w') as output:
output.write('\n'.join('{}:{}'.format(w, int(1000. * f / tf)) for f, w in words))
output.write("\n\n\n")
# We first identify the most discussed topic, i.e., the one with the
# highest total weight
topics = matutils.corpus2dense(model[corpus], num_terms=model.num_topics)
weight = topics.sum(1)
max_topic = weight.argmax()
# Get the top 64 words for this topic
# Without the argument, show_topic would return only 10 words
words = model.show_topic(max_topic, 64)
# This function will actually check for the presence of pytagcloud and is otherwise a no-op
create_cloud('cloud_blei_lda.png', words)
num_topics_used = [len(model[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist(num_topics_used, np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
fig.tight_layout()
fig.savefig('Figure_04_01.png')
# Now, repeat the same exercise using alpha=1.0
# You can edit the constant below to play around with this parameter
ALPHA = 1.0
model1 = models.ldamodel.LdaModel(
corpus, num_topics=NUM_TOPICS, id2word=corpus.id2word, alpha=ALPHA)
num_topics_used1 = [len(model1[doc]) for doc in corpus]
fig,ax = plt.subplots()
ax.hist([num_topics_used, num_topics_used1], np.arange(42))
ax.set_ylabel('Nr of documents')
ax.set_xlabel('Nr of topics')
# The coordinates below were fit by trial and error to look good
ax.text(9, 223, r'default alpha')
ax.text(26, 156, 'alpha=1.0')
fig.tight_layout()
fig.savefig('Figure_04_02.png')
| mit |
runninghack/CensorshipDetection | bin/drawingPruning.py | 1 | 9006 | #!/usr/bin/env python
"""
Draw a graph with matplotlib.
You must have matplotlib for this to work.
"""
__author__ = """Aric Hagberg ([email protected])"""
try:
import matplotlib.pyplot as plt
except:
raise
import time
import networkx as nx
def test1():
abnormalNodes = []
trueAbnormalNodes = []
resultNodes = []
fp = open("pruning_1.txt")
edges = []
for i, line in enumerate(fp):
if i == 0:
count = 0
for item in line.rstrip().split(' '):
if float(item.rstrip()) != 0.0:
abnormalNodes.append(count)
count = count + 1
if i == 1:
for item in line.rstrip().split(' '):
trueAbnormalNodes.append(int(item))
if i == 2:
for item in line.rstrip().split(' '):
resultNodes.append(int(item.rstrip()))
if i >= 3:
l0 = int(line.rstrip().split(' ')[0])
l1 = int(line.rstrip().split(' ')[1])
if l0 - l1 > 50 or l0 - l1 < -50:
x = -1
elif l0%50 == 0 or l1%50 == 0:
if l0%50 == 0 and (l1 - l0 == 49 or l1 - l0 == -49 ):
x = -1
elif l1%50 == 0 and (l1 - l0 == 49 or l1 - l0 == -49 ):
x = -1
else:
edges.append((l0,l1))
else:
edges.append((l0,l1))
fp.close()
normalNodes = [item for item in range(2500) if item not in abnormalNodes]
count = 0
for item in resultNodes:
if item in trueAbnormalNodes:
count = count + 1
print 'Precision is ',count*1.0 / len(resultNodes)*1.0, ' and recall is ',count*1.0 / len(trueAbnormalNodes)*1.0
pos = dict()
count = 0
for i in range(50):
for j in range(50):
t = (i,j)
pos[count] = t
count = count + 1
G = nx.Graph()
for item in range(2500):
G.add_node(item)
nx.draw_networkx_nodes(G,pos,node_size=50,nodelist=normalNodes,node_color='w')
nx.draw_networkx_nodes(G,pos,node_size=50,nodelist=abnormalNodes,node_color='r')
nx.draw_networkx_nodes(G,pos,node_size=10,nodelist=resultNodes,node_color='g')
nx.draw_networkx_edges(G,pos,edgelist=edges, alpha=0.5,width=5,edge_color = 'm')
plt.axis('off')
plt.show() # display
plt.close()
def test2500(fileName):
#all abnormal nodes ; true abnormal nodes ; result nodes ; edges
AllAbnormalNodes = []
trueAbnormalNodes = []
components = dict()
resultNodes = []
fp = open(fileName)
edges = dict()
for i, line in enumerate(fp):
if i == 0:
for item in line.rstrip().split(' '):
AllAbnormalNodes.append(int(item.rstrip()))
if i == 1:
for item in line.rstrip().split(' '):
trueAbnormalNodes.append(int(item.rstrip()))
if i == 2:
for item in line.rstrip().split(' '):
resultNodes.append(int(item.rstrip()))
if i == 3:
count = 0
for items in line.rstrip().split('#'):
edges[count] = []
if items == '' or items == ' ':
continue
for item in items.rstrip().split(';'):
if item == '':
continue
print item
l0 = int(item.rstrip().split(' ')[0])
l1 = int(item.rstrip().split(' ')[1])
if l0 - l1 > 50 or l0 - l1 < -50:
x = -1
elif l0%50 == 0 or l1%50 == 0:
if l0%50 == 0 and (l1 - l0 == 49 or l1 - l0 == -49 ):
x = -1
elif l1%50 == 0 and (l1 - l0 == 49 or l1 - l0 == -49 ):
x = -1
else:
edges[count].append((l0,l1))
else:
edges[count].append((l0,l1))
count = count + 1
fp.close()
for item in edges:
print item, edges[item]
normalNodes = [item for item in range(2500) if item not in AllAbnormalNodes]
count = 0
for item in resultNodes:
if item in trueAbnormalNodes:
count = count + 1
print 'Precision is ',count*1.0 / len(resultNodes)*1.0, ' and recall is ',count*1.0 / len(trueAbnormalNodes)*1.0
pos = dict()
count = 0
for i in range(50):
for j in range(50):
t = (i,j)
pos[count] = t
count = count + 1
G = nx.Graph()
for item in range(2500):
G.add_node(item)
nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=resultNodes,node_color='r',node_shape="s")
nx.draw_networkx_nodes(G,pos,node_size=30,nodelist=normalNodes,node_color='w')
#nx.draw_networkx_nodes(G,pos,node_size=40,nodelist=trueAbnormalNodes,node_color='k')
nx.draw_networkx_nodes(G,pos,node_size=30,nodelist=AllAbnormalNodes,node_color='k')
#nx.draw_networkx_edges(G,pos,edgelist=edges[0], alpha=0.5,width=5,edge_color = 'm')
#nx.draw_networkx_edges(G,pos,edgelist=edges[1], alpha=0.5,width=5,edge_color = 'y')
#nx.draw_networkx_edges(G,pos,edgelist=edges[2], alpha=0.5,width=5,edge_color = 'b')
#nx.draw_networkx_edges(G,pos,edgelist=edges[3], alpha=0.5,width=5,edge_color = 'c')
plt.axis('off')
plt.show() # display
plt.close()
def test3600(fileName):
#all abnormal nodes ; true abnormal nodes ; result nodes ; edges
AllAbnormalNodes = []
trueAbnormalNodes = []
components = dict()
resultNodes = []
fp = open(fileName)
edges = dict()
for i, line in enumerate(fp):
if i == 0:
for item in line.rstrip().split(' '):
AllAbnormalNodes.append(int(item.rstrip()))
if i == 1:
for item in line.rstrip().split(' '):
trueAbnormalNodes.append(int(item.rstrip()))
if i == 2:
for item in line.rstrip().split(' '):
resultNodes.append(int(item.rstrip()))
if i == 3:
count = 0
for items in line.rstrip().split('#'):
edges[count] = []
if items == '' or items == ' ':
continue
for item in items.rstrip().split(';'):
if item == '':
continue
print item
l0 = int(item.rstrip().split(' ')[0])
l1 = int(item.rstrip().split(' ')[1])
if l0 - l1 > 60 or l0 - l1 < -60:
x = -1
elif l0%60 == 0 or l1%60 == 0:
if l0%60 == 0 and (l1 - l0 == 59 or l1 - l0 == -59 ):
x = -1
elif l1%60 == 0 and (l1 - l0 == 59 or l1 - l0 == -59 ):
x = -1
else:
edges[count].append((l0,l1))
else:
edges[count].append((l0,l1))
count = count + 1
fp.close()
for item in edges:
print item, edges[item]
normalNodes = [item for item in range(3600) if item not in AllAbnormalNodes]
count = 0
for item in resultNodes:
if item in trueAbnormalNodes:
count = count + 1
print 'Precision is ',count*1.0 / len(resultNodes)*1.0, ' and recall is ',count*1.0 / len(trueAbnormalNodes)*1.0
pos = dict()
count = 0
for i in range(60):
for j in range(60):
t = (i,j)
pos[count] = t
count = count + 1
G = nx.Graph()
for item in range(3600):
G.add_node(item)
nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=resultNodes,node_color='r',node_shape="s")
nx.draw_networkx_nodes(G,pos,node_size=30,nodelist=normalNodes,node_color='w')
#nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=AllAbnormalNodes,node_color='r')
nx.draw_networkx_nodes(G,pos,node_size=30,nodelist=AllAbnormalNodes,node_color='k')
#nx.draw_networkx_edges(G,pos,edgelist=edges[0], alpha=0.5,width=5,edge_color = 'y')
#nx.draw_networkx_edges(G,pos,edgelist=edges[1], alpha=0.5,width=5,edge_color = 'm')
#nx.draw_networkx_edges(G,pos,edgelist=edges[2], alpha=0.5,width=5,edge_color = 'b')
#nx.draw_networkx_edges(G,pos,edgelist=edges[3], alpha=0.5,width=5,edge_color = 'c')
plt.axis('off')
plt.show() # display
plt.close()
if __name__ == '__main__':
#test2500("grid_2500_precen_0.05_numCC_4_0.02_trueS.txt")
test3600("grid_3600_precen_0.05_numCC_4_0.02_trueS.txt")
| gpl-2.0 |
subutai/nupic.research | projects/continuous_learning/correlation_experiment.py | 3 | 12568 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
#
import time
import matplotlib.pyplot as plt
import numpy as np
from cont_speech_experiment import ContinuousSpeechExperiment
from nupic.research.frameworks.continuous_learning.correlation_metrics import (
plot_metrics,
register_act,
)
from nupic.research.support import parse_config
def train_sequential(experiment, print_acc=False):
""" Trains a ContinuousSpeechExperiment sequentially,
i.e. by pairs of labels
:param experiment: ContinuousSpeechExperiment
"""
np.random.seed(np.random.randint(0, 100))
train_labels = np.reshape(np.random.permutation(np.arange(1, 11)), (5, 2))
epochs = 1 # 1 run per class pair
entropies, duty_cycles = [], []
for label in train_labels:
print("training on class {}".format(label))
freeze_indices = np.hstack( # class indices to freeze in output layer
[
0,
np.delete(
train_labels, np.where(train_labels == label)[0], axis=0
).flatten(),
]
)
for epoch in range(epochs):
experiment.train(epoch, label, indices=freeze_indices)
mt = experiment.test()
if print_acc:
print("Mean accuracy: {}".format(mt["mean_accuracy"]))
entropies.append(get_entropies(experiment))
duty_cycles.append(get_duty_cycles(experiment))
return entropies, duty_cycles
def get_duty_cycles(experiment):
duty_cycles = []
for module in experiment.model:
dc = module.state_dict()
if "duty_cycle" in dc:
duty_cycles.append(dc["duty_cycle"].detach().cpu().numpy())
return duty_cycles
def get_entropies(experiment):
entropies = []
for module in experiment.model.modules():
if module == experiment.model:
continue
if hasattr(module, "entropy"):
entropy = module.entropy()
entropies.append(entropy.detach().cpu().numpy())
return entropies
class SparseCorrExperiment(object):
def __init__(self, config_file):
self.dense_network = "denseCNN2"
self.sparse_network = "sparseCNN2"
self.entropies = []
self.duty_cycles = []
self.config_file = "experiments.cfg"
def config_init(self, exp):
with open(self.config_file) as cf:
config = parse_config(cf)
exp_config = config[exp]
exp_config["name"] = exp
return exp_config
def reset_ents_dcs(self):
self.entropies = []
self.duty_cycles = []
def model_comparison(self, freeze_linear=False, sequential=False, shuffled=False):
""" Compare metrics on dense and sparse CNN"""
self.reset_ents_dcs()
odcorrs, dcorrs = [], []
oddotproducts, ddotproducts = [], []
output = [odcorrs, dcorrs, oddotproducts, ddotproducts]
if shuffled:
shodcorrs, shdcorrs = [], []
shoddotproducts, shddotproducts = [], []
sh_output = [shodcorrs, shdcorrs, shoddotproducts, shddotproducts]
for exp in [self.dense_network, self.sparse_network]:
config = self.config_init(exp)
if freeze_linear:
config["freeze_params"] = "output"
else:
config["freeze_params"] = []
if shuffled:
outputs, sh_outputs = self.run_experiment(
config, sequential=sequential, shuffled=True
)
[output[k].append(outputs[k]) for k in range(len(outputs))]
[sh_output[k].append(sh_outputs[k]) for k in range(len(sh_outputs))]
else:
outputs = self.run_experiment(
config, sequential=sequential, shuffled=False
)
[output[k].append(outputs[k]) for k in range(len(outputs))]
plot_metrics(output)
if shuffled:
plot_metrics(sh_output)
return output, sh_output
else:
return output
def act_fn_comparison(self, freeze_linear=False, sequential=False, shuffled=False):
""" Compare k-winner and ReLU activations on sparse and dense weights. """
self.reset_ents_dcs()
cnn_weight_sparsities = [(1.0, 1.0), (0.5, 0.2)]
linear_weight_sparsities = [(1.0,), (0.1,)]
cnn_percent_on = [(0.095, 0.125), (1.0, 1.0)]
linear_percent_on = [(0.1,), (1.0,)]
exp = self.sparse_network
odcorrs, dcorrs = [], []
oddotproducts, ddotproducts = [], []
output = [odcorrs, dcorrs, oddotproducts, ddotproducts]
if shuffled:
shodcorrs, shdcorrs = [], []
shoddotproducts, shddotproducts = [], []
sh_output = [shodcorrs, shdcorrs, shoddotproducts, shddotproducts]
for i in range(2):
for j in range(2):
config = self.config_init(exp)
config["cnn_weight_sparsity"] = cnn_weight_sparsities[i]
config["weight_sparsity"] = linear_weight_sparsities[i]
config["cnn_percent_on"] = cnn_percent_on[j]
config["linear_percent_on"] = linear_percent_on[j]
if freeze_linear:
config["freeze_params"] = "output"
else:
config["freeze_params"] = []
if shuffled:
outputs, sh_outputs = self.run_experiment(config, shuffled=shuffled)
[output[k].append(outputs[k]) for k in range(len(outputs))]
[sh_output[k].append(sh_outputs[k]) for k in range(len(sh_outputs))]
else:
outputs = self.run_experiment(config, shuffled=shuffled)
[output[k].append(outputs[k]) for k in range(len(outputs))]
leg = ["dense + k-winner", "dense + ReLU", "sparse + k-winner", "sparse + ReLU"]
plot_metrics(output, legend_=leg)
if shuffled:
plot_metrics(sh_output, legend_=leg)
return output, sh_output
else:
return output
def layer_size_comparison(
self,
layer_sizes,
compare_models=False,
freeze_linear=False,
sequential=False,
shuffled=False,
):
"""Get metrics for specified layer sizes
:param layer_sizes: list of desired CNN layer sizes
:param compare_models (Boolean): will also run a dense CNN
"""
self.reset_ents_dcs()
# get a factor to multiply the weight sparsity and percent on with
sparse_factor = [layer_sizes[0] / k for k in layer_sizes]
odcorrs, dcorrs = [], []
oddotproducts, ddotproducts = [], []
output = [odcorrs, dcorrs, oddotproducts, ddotproducts]
if shuffled:
shodcorrs, shdcorrs = [], []
shoddotproducts, shddotproducts = [], []
sh_output = [shodcorrs, shdcorrs, shoddotproducts, shddotproducts]
if compare_models:
experiments = [self.dense_network, self.sparse_network]
else:
experiments = [self.sparse_network]
for exp in experiments:
for ind in range(len(layer_sizes)):
config = self.config_init(exp)
# get the default sparsity in the config file
# to multiply with "sparse_factor"
curr_sparsity = config["cnn_weight_sparsity"]
curr_percent_on = config["cnn_percent_on"]
if freeze_linear:
config["freeze_params"] = "output"
else:
config["freeze_params"] = []
config["cnn_out_channels"] = (layer_sizes[ind], layer_sizes[ind])
config["cnn_weight_sparsity"] = (
curr_sparsity[0] * sparse_factor[ind],
curr_sparsity[1] * sparse_factor[ind],
)
config["cnn_percent_on"] = (
curr_percent_on[0] * sparse_factor[ind],
curr_percent_on[1] * sparse_factor[ind],
)
if shuffled:
outputs, sh_outputs = self.run_experiment(
config, layer_sizes[ind], shuffled=shuffled
)
[output[k].append(outputs[k]) for k in range(len(outputs))]
[sh_output[k].append(sh_outputs[k]) for k in range(len(sh_outputs))]
else:
outputs = self.run_experiment(
config, layer_sizes[ind], shuffled=shuffled
)
[output[k].append(outputs[k]) for k in range(len(outputs))]
leg = list(zip(np.repeat(experiments, len(layer_sizes)), 3 * layer_sizes))
plot_metrics(output, legend_=leg)
if shuffled:
plot_metrics(sh_output, legend_=leg)
return output, sh_output
else:
return output
def run_experiment(
self,
config,
layer_size=None,
sequential=False,
shuffled=False,
boosting=True,
duty_cycle_period=1000,
):
if not boosting:
config["boost_strength"] = 0.0
config["boost_strength_factor"] = 0.0
config["duty_cycle_period"] = duty_cycle_period
experiment = ContinuousSpeechExperiment(config=config)
start_time = time.time()
if sequential:
entropies, duty_cycles = train_sequential(experiment)
self.entropies.append(entropies)
self.duty_cycles.append(duty_cycles)
else:
experiment.train_entire_dataset(0)
end_time = np.round(time.time() - start_time, 3)
if layer_size is not None:
print("{} layer size network trained in {} s".format(layer_size, end_time))
else:
print("Network trained in {} s".format(end_time))
if shuffled:
corrs, shuffled_corrs = register_act(experiment, shuffle=True)
return corrs, shuffled_corrs
else:
corrs = register_act(experiment)
return corrs
def plot_duty_cycles(experiment):
for dc in experiment.duty_cycles:
if len(dc[0]) > 0:
dc_flat = [item.flatten() for sublist in dc for item in sublist]
plt.figure(figsize=(10, 12))
items = ["conv1", "conv2", "linear1"]
for idx in range(len(items)):
plt.subplot(2, 2, idx + 1)
ks = dc_flat[idx :: len(items)]
alpha = 0.12 / np.log(ks[0].shape[0])
plt.plot(ks, "k.", alpha=0.3)
plt.plot(ks, "k", alpha=alpha)
plt.xticks(np.arange(5), np.arange(1, 6))
plt.ylim((0.0, 0.2))
plt.title(items[idx])
plt.xlabel("Training iteration")
plt.ylabel("Duty cycle")
def plot_entropies(experiment):
for ents in experiment.entropies:
if len(ents[0]) > 0:
ents_flat = [item.flatten() for sublist in ents for item in sublist]
plt.figure(figsize=(10, 12))
items = ["conv1", "conv2", "linear1"]
for idx in range(len(items)):
plt.subplot(2, 2, idx + 1)
ks = ents_flat[idx :: len(items)]
plt.plot(ks, "k.", alpha=0.6)
plt.plot(ks, "k", alpha=0.2)
plt.xticks(np.arange(5), np.arange(1, 6))
plt.title(items[idx])
plt.xlabel("Training iteration")
plt.ylabel("Entropy")
| agpl-3.0 |
laputian/dml | equation/beginner_theano_2.py | 1 | 1849 | import theano
import theano.tensor as T
import theano.tensor.nnet as nnet
import numpy as np
import matplotlib.pyplot as plt
inputs = np.array([0, 1, 2, 3, 4, 5]).reshape(6,1) #training data X
len_inp = inputs.shape[0]
exp_y = np.array([0.0, 0.5, 1, 1.5, 1.0, 1.5])
x = T.dvector()
y = T.dscalar()
def layer(x, w):
b = np.array([1], dtype=theano.config.floatX)
new_x = T.concatenate([x, b])
m = T.dot(w.T, new_x) #theta1: 3x3 * x: 3x1 = 3x1 ;;; theta2: 1x4 * 4x1
h = 2 * nnet.sigmoid(m)
return h
def grad_desc(cost, theta):
alpha = 0.1 #learning rate
return theta - (alpha * T.grad(cost, wrt=theta))
theta1 = theano.shared(np.array(np.random.rand(2,len_inp -1), dtype=theano.config.floatX)) # randomly initialize
theta2 = theano.shared(np.array(np.random.rand(len_inp,1), dtype=theano.config.floatX))
hid1 = layer(x, theta1) #hidden layer
out1 = T.sum(layer(hid1, theta2)) #output layer
fc = (out1 - y)**2 #cost expression
cost = theano.function(inputs=[x, y], outputs=fc, updates=[
(theta1, grad_desc(fc, theta1)),
(theta2, grad_desc(fc, theta2))])
cur_cost = 0
for i in range(30000):
for k in range(len(inputs)):
cur_cost = cost(inputs[k], exp_y[k]) #call our Theano-compiled cost function, it will auto update weights
if i % 500 == 0: #only print the cost every 500 epochs/iterations (to save space)
print('Cost: %s' % (cur_cost,))
run_forward = theano.function(inputs=[x], outputs=out1)
marg = 1
inf_ax = np.min(inputs)-marg
sup_ax = np.max(inputs)+marg
interval = np.linspace(inf_ax ,sup_ax ,100)
output=[]
for x in np.nditer(interval):
output.append(run_forward([x]))
plt.axis([inf_ax, sup_ax, np.min(exp_y)-marg, np.max(exp_y) +marg])
plt.plot(interval, np.asarray(output))
plt.plot(inputs, exp_y , marker='o', color='r', linestyle='', label='Expected')
plt.show() | mit |
RomainBrault/scikit-learn | benchmarks/bench_plot_svd.py | 72 | 2914 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
import six
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(six.iteritems(results))):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
ch3ll0v3k/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
jimgoo/zipline-fork | zipline/finance/risk/period.py | 1 | 9089 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import math
import numpy as np
import numpy.linalg as la
from six import iteritems
import pandas as pd
from . import risk
from . risk import (
check_entry,
)
from empyrical import (
alpha_beta_aligned,
annual_volatility,
cum_returns,
downside_risk,
information_ratio,
max_drawdown,
sharpe_ratio,
sortino_ratio
)
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
log = logbook.Logger('Risk Period')
choose_treasury = functools.partial(risk.choose_treasury,
risk.select_treasury_duration)
class RiskMetricsPeriod(object):
def __init__(self, start_date, end_date, returns, env,
benchmark_returns=None, algorithm_leverages=None):
self.env = env
treasury_curves = env.treasury_curves
if treasury_curves.index[-1] >= start_date:
mask = ((treasury_curves.index >= start_date) &
(treasury_curves.index <= end_date))
self.treasury_curves = treasury_curves[mask]
else:
# our test is beyond the treasury curve history
# so we'll use the last available treasury curve
self.treasury_curves = treasury_curves[-1:]
self.start_date = start_date
self.end_date = end_date
if benchmark_returns is None:
br = env.benchmark_returns
benchmark_returns = br[(br.index >= returns.index[0]) &
(br.index <= returns.index[-1])]
self.algorithm_returns = self.mask_returns_to_period(returns,
env)
self.benchmark_returns = self.mask_returns_to_period(benchmark_returns,
env)
self.algorithm_leverages = algorithm_leverages
self.calculate_metrics()
def calculate_metrics(self):
#print('-'*100)
#print(self.benchmark_returns.head())
#print(self.algorithm_returns.head())
self.benchmark_period_returns = \
cum_returns(self.benchmark_returns).iloc[-1]
self.algorithm_period_returns = \
cum_returns(self.algorithm_returns).iloc[-1]
# fix the case when the indices don't match
if not self.algorithm_returns.index.equals(self.benchmark_returns.index):
joined = self.algorithm_returns.align(self.benchmark_returns, join='outer')
self.algorithm_returns = joined[0].fillna(method='ffill')
self.benchmark_returns = joined[1].fillna(method='ffill')
if not self.algorithm_returns.index.equals(self.benchmark_returns.index):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date
)
# save for debugging
import pickle
pickle.dump((self.algorithm_returns, self.benchmark_returns), open('/tmp/zp-returns.pkl', 'wb'))
raise Exception(message)
self.num_trading_days = len(self.benchmark_returns)
## begin empyrical metrics
self.mean_algorithm_returns = (
self.algorithm_returns.cumsum() /
np.arange(1, self.num_trading_days + 1, dtype=np.float64)
)
self.benchmark_volatility = annual_volatility(self.benchmark_returns)
self.algorithm_volatility = annual_volatility(self.algorithm_returns)
self.treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
self.end_date,
self.env,
)
self.sharpe = sharpe_ratio(
self.algorithm_returns,
)
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(self.sharpe):
self.sharpe = 0.0
self.downside_risk = downside_risk(
self.algorithm_returns.values
)
self.sortino = sortino_ratio(
self.algorithm_returns.values,
_downside_risk=self.downside_risk,
)
self.information = information_ratio(
self.algorithm_returns.values,
self.benchmark_returns.values,
)
self.alpha, self.beta = alpha_beta_aligned(
self.algorithm_returns.values,
self.benchmark_returns.values,
)
self.excess_return = self.algorithm_period_returns - \
self.treasury_period_return
self.max_drawdown = max_drawdown(self.algorithm_returns.values)
self.max_leverage = self.calculate_max_leverage()
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
period_label = self.end_date.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.benchmark_volatility,
'algo_volatility': self.algorithm_volatility,
'treasury_period_return': self.treasury_period_return,
'algorithm_period_return': self.algorithm_period_returns,
'benchmark_period_return': self.benchmark_period_returns,
'sharpe': self.sharpe,
'sortino': self.sortino,
'information': self.information,
'beta': self.beta,
'alpha': self.alpha,
'excess_return': self.excess_return,
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: None if check_entry(k, v) else v
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
metrics = [
"algorithm_period_returns",
"benchmark_period_returns",
"excess_return",
"num_trading_days",
"benchmark_volatility",
"algorithm_volatility",
"sharpe",
"sortino",
"information",
# "algorithm_covariance",
# "benchmark_variance",
"beta",
"alpha",
"max_drawdown",
"max_leverage",
"algorithm_returns",
"benchmark_returns",
# "condition_number",
# "eigen_values"
]
for metric in metrics:
value = getattr(self, metric)
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def mask_returns_to_period(self, daily_returns, env):
if isinstance(daily_returns, list):
returns = pd.Series([x.returns for x in daily_returns],
index=[x.date for x in daily_returns])
else: # otherwise we're receiving an index already
returns = daily_returns
trade_days = env.trading_days
trade_day_mask = returns.index.normalize().isin(trade_days)
mask = ((returns.index >= self.start_date) &
(returns.index <= self.end_date) & trade_day_mask)
returns = returns[mask]
return returns
def calculate_max_leverage(self):
if self.algorithm_leverages is None:
return 0.0
else:
return max(self.algorithm_leverages)
def __getstate__(self):
state_dict = {k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
STATE_VERSION = 3
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 3
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("RiskMetricsPeriod saved state \
is too old.")
self.__dict__.update(state)
| apache-2.0 |
coreymbryant/libmesh | doc/statistics/libmesh_citations.py | 1 | 2369 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Number of "papers using libmesh" by year.
#
# Note 1: this does not count citations "only," the authors must have actually
# used libmesh in part of their work. Therefore, these counts do not include
# things like Wolfgang citing us in his papers to show how Deal.II is
# superior...
#
# Note 2: I typically update this data after regenerating the web page,
# since bibtex2html renumbers the references starting from "1" each year.
#
# Note 3: These citations include anything that is not a dissertation/thesis.
# So, some are conference papers, some are journal articles, etc.
#
# Note 4: The libmesh paper came out in 2006, but there are some citations
# prior to that date, obviously. These counts include citations of the
# website libmesh.sf.net as well...
#
# Note 5: Preprints are listed as the "current year + 1" and are constantly
# being moved to their respective years after being published.
data = [
'2004', 5,
'\'05', 2,
'\'06', 13,
'\'07', 8,
'\'08', 24,
'\'09', 30,
'\'10', 24,
'\'11', 37,
'\'12', 50,
'\'13', 80,
'\'14', 63,
'\'15', 71,
'\'16', 44,
'P', 10, # Preprints
'T', 51 # Theses
]
# Extract the x-axis labels from the data array
xlabels = data[0::2]
# Extract the publication counts from the data array
n_papers = data[1::2]
# The number of data points
N = len(xlabels);
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Create an x-axis for plotting
x = np.linspace(1, N, N)
# Width of the bars
width = 0.8
# Make the bar chart. Plot years in blue, preprints and theses in green.
ax.bar(x[0:N-2], n_papers[0:N-2], width, color='b')
ax.bar(x[N-2:N], n_papers[N-2:N], width, color='g')
# Label the x-axis
plt.xlabel('P=Preprints, T=Theses')
# Set up the xtick locations and labels. Note that you have to offset
# the position of the ticks by width/2, where width is the width of
# the bars.
ax.set_xticks(np.linspace(1,N,N) + width/2)
ax.set_xticklabels(xlabels)
# Create a title string
title_string = 'Papers by People Using LibMesh, (' + str(sum(n_papers)) + ' Total)'
fig.suptitle(title_string)
# Save as PDF
plt.savefig('libmesh_citations.pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
Barmaley-exe/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
wateraccounting/wa | Generator/Sheet4/main.py | 1 | 13145 | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: [email protected]
Repository: https://github.com/wateraccounting/wa
Module: Function/Four
"""
# import general python modules
import os
import numpy as np
import pandas as pd
from netCDF4 import Dataset
def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, ETref_Product, Runoff_Product, Startdate, Enddate, Simulation):
"""
This functions is the main framework for calculating sheet 4.
Parameters
----------
Basin : str
Name of the basin
P_Product : str
Name of the rainfall product that will be used
ET_Product : str
Name of the evapotranspiration product that will be used
LAI_Product : str
Name of the LAI product that will be used
Runoff_Product : str
Name of the Runoff product that will be used
Moving_Averiging_Length, int
Defines the length of the moving average
Startdate : str
Contains the start date of the model 'yyyy-mm-dd'
Enddate : str
Contains the end date of the model 'yyyy-mm-dd'
Simulation : int
Defines the simulation
"""
######################### Import WA modules ###################################
from wa.General import raster_conversions as RC
from wa.General import data_conversions as DC
import wa.Functions.Four as Four
import wa.Functions.Start as Start
import wa.Generator.Sheet4 as Generate
import wa.Functions.Start.Get_Dictionaries as GD
######################### Set General Parameters ##############################
# Get environmental variable for the Home folder
if WA_HOME_folder == '':
WA_env_paths = os.environ["WA_HOME"].split(';')
Dir_Home = WA_env_paths[0]
else:
Dir_Home = WA_HOME_folder
# Create the Basin folder
Dir_Basin = os.path.join(Dir_Home, Basin)
output_dir = os.path.join(Dir_Basin, "Simulations", "Simulation_%d" %Simulation)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Get the boundaries of the basin based on the shapefile of the watershed
# Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)
Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)
# Find the maximum moving window value
ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0')
Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())
############## Cut dates into pieces if it is needed ######################
# Check the years that needs to be calculated
years = range(int(Startdate.split('-')[0]),int(Enddate.split('-')[0]) + 1)
for year in years:
# Create .nc file if not exists
nc_outname = os.path.join(output_dir, "%d.nc" % year)
if not os.path.exists(nc_outname):
DC.Create_new_NC_file(nc_outname, Example_dataset, Basin)
# Open variables in netcdf
fh = Dataset(nc_outname)
Variables_NC = [var for var in fh.variables]
fh.close()
# Create Start and End date for time chunk
Startdate_part = '%d-01-01' %int(year)
Enddate_part = '%s-12-31' %int(year)
if int(year) == int(years[0]):
Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)
Startdate_Moving_Average_String = Startdate_Moving_Average.strftime('%Y-%m-%d')
else:
Startdate_Moving_Average_String = Startdate_part
############################# Download Data ###################################
# Download data
if not "Precipitation" in Variables_NC:
Data_Path_P_Monthly = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, P_Product)
if not "Actual_Evapotranspiration" in Variables_NC:
Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, ET_Product)
if not "Reference_Evapotranspiration" in Variables_NC:
Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_part, ETref_Product)
if not "Grey_Water_Footprint" in Variables_NC:
Data_Path_GWF = Start.Download_Data.GWF(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']])
if not "Theta_Saturated_Topsoil" in Variables_NC:
Data_Path_ThetaSat_topsoil = Start.Download_Data.Soil_Properties(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Para = 'ThetaSat_TopSoil')
###################### Save Data as netCDF files ##############################
#______________________________Precipitation_______________________________
# 1.) Precipitation data
if not "Precipitation" in Variables_NC:
# Get the data of Precipitation and save as nc
DataCube_Prec = RC.Get3Darray_time_series_monthly(Data_Path_P_Monthly, Startdate_part, Enddate_part, Example_data = Example_dataset)
DC.Add_NC_Array_Variable(nc_outname, DataCube_Prec, "Precipitation", "mm/month", 0.01)
del DataCube_Prec
#_______________________Reference Evaporation______________________________
# 2.) Reference Evapotranspiration data
if not "Reference_Evapotranspiration" in Variables_NC:
# Get the data of Precipitation and save as nc
DataCube_ETref = RC.Get3Darray_time_series_monthly(Data_Path_ETref, Startdate_part, Enddate_part, Example_data = Example_dataset)
DC.Add_NC_Array_Variable(nc_outname, DataCube_ETref, "Reference_Evapotranspiration", "mm/month", 0.01)
del DataCube_ETref
#_______________________________Evaporation________________________________
# 3.) Evapotranspiration data
if not "Actual_Evapotranspiration" in Variables_NC:
# Get the data of Evaporation and save as nc
DataCube_ET = RC.Get3Darray_time_series_monthly(Data_Path_ET, Startdate_part, Enddate_part, Example_data = Example_dataset)
DC.Add_NC_Array_Variable(nc_outname, DataCube_ET, "Actual_Evapotranspiration", "mm/month", 0.01)
del DataCube_ET
#_____________________________________GWF__________________________________
# 4.) Grey Water Footprint data
if not "Grey_Water_Footprint" in Variables_NC:
# Get the data of grey water footprint and save as nc
GWF_Filepath = os.path.join(Dir_Basin, Data_Path_GWF, "Gray_Water_Footprint_Fraction.tif")
dest_GWF = RC.reproject_dataset_example(GWF_Filepath, Example_dataset, method=1)
DataCube_GWF = dest_GWF.GetRasterBand(1).ReadAsArray()
DC.Add_NC_Array_Static(nc_outname, DataCube_GWF, "Grey_Water_Footprint", "fraction", 0.0001)
del DataCube_GWF
####################### Calculations Sheet 4 ##############################
############## Cut dates into pieces if it is needed ######################
years = range(int(Startdate.split('-')[0]),int(Enddate.split('-')[0]) + 1)
for year in years:
if len(years) > 1.0:
if year is years[0]:
Startdate_part = Startdate
Enddate_part = '%s-12-31' %year
if year is years[-1]:
Startdate_part = '%s-01-01' %year
Enddate_part = Enddate
else:
Startdate_part = Startdate
Enddate_part = Enddate
#____________ Evapotranspiration data split in ETblue and ETgreen ____________
if not ("Blue_Evapotranspiration" in Variables_NC or "Green_Evapotranspiration" in Variables_NC):
# Calculate Blue and Green ET
DataCube_ETblue, DataCube_ETgreen = Four.SplitET.Blue_Green(Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate, Enddate)
DC.Add_NC_Array_Variable(nc_outname, DataCube_ETblue, "Blue_Evapotranspiration", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, DataCube_ETgreen, "Green_Evapotranspiration", "mm/month", 0.01)
del DataCube_ETblue, DataCube_ETgreen
#____________ Calculate non-consumend and Total supply maps by using fractions and consumed maps (blue ET) ____________
if not ("Total_Supply" in Variables_NC or "Non_Consumed_Water" in Variables_NC):
# Do the calculations
DataCube_Total_Supply, DataCube_Non_Consumed = Four.Total_Supply.Fraction_Based(nc_outname, Startdate_part, Enddate_part)
# Save the Total Supply and non consumed data as NetCDF files
DC.Add_NC_Array_Variable(nc_outname, DataCube_Total_Supply, "Total_Supply", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, DataCube_Non_Consumed, "Non_Consumed_Water", "mm/month", 0.01)
del DataCube_Total_Supply, DataCube_Non_Consumed
#____________ Apply fractions over total supply to calculate gw and sw supply ____________
if not ("Total_Supply_Surface_Water" in Variables_NC or "Total_Supply_Ground_Water" in Variables_NC):
# Do the calculations
DataCube_Total_Supply_SW, DataCube_Total_Supply_GW = Four.SplitGW_SW_Supply.Fraction_Based(nc_outname, Startdate_part, Enddate_part)
# Save the Total Supply surface water and Total Supply ground water data as NetCDF files
DC.Add_NC_Array_Variable(nc_outname, DataCube_Total_Supply_SW, "Total_Supply_Surface_Water", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, DataCube_Total_Supply_GW, "Total_Supply_Ground_Water", "mm/month", 0.01)
del DataCube_Total_Supply_SW, DataCube_Total_Supply_GW
#____________ Apply gray water footprint fractions to calculated non recoverable flow based on the non consumed flow ____________
if not ("Non_Recovable_Flow" in Variables_NC or "Recovable_Flow" in Variables_NC):
# Calculate the non recovable flow and recovable flow by using Grey Water Footprint values
DataCube_NonRecovableFlow, Datacube_RecovableFlow = Four.SplitNonConsumed_NonRecov.GWF_Based(nc_outname, Startdate_part, Enddate_part)
# Get the data of Evaporation and save as nc
DC.Add_NC_Array_Variable(nc_outname, DataCube_NonRecovableFlow, "Non_Recovable_Flow", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, Datacube_RecovableFlow, "Recovable_Flow", "mm/month", 0.01)
del DataCube_NonRecovableFlow, Datacube_RecovableFlow
#____________Apply fractions to calculate the non recovarable SW/GW and recovarable SW/GW ____________
# 1. Non recovarable flow
if not ("Non_Recovable_Flow_Ground_Water" in Variables_NC or "Non_Recovable_Flow_Surface_Water" in Variables_NC):
# Calculate the non recovable return flow to ground and surface water
DataCube_NonRecovableFlow_Return_GW, Datacube_NonRecovableFlow_Return_SW = Four.SplitGW_SW_Return.Fraction_Based(nc_outname, "Non_Recovable_Flow", Startdate_part, Enddate_part)
# Get the data of Evaporation and save as nc
DC.Add_NC_Array_Variable(nc_outname, DataCube_NonRecovableFlow_Return_GW, "Non_Recovable_Flow_Ground_Water", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, Datacube_NonRecovableFlow_Return_SW, "Non_Recovable_Flow_Surface_Water", "mm/month", 0.01)
del DataCube_NonRecovableFlow_Return_GW, Datacube_NonRecovableFlow_Return_SW
# 2. Recovarable flow
if not ("Recovable_Flow_Ground_Water" in Variables_NC or "Recovable_Flow_Surface_Water" in Variables_NC):
# Calculate the non recovable return flow to ground and surface water
DataCube_RecovableFlow_Return_GW, Datacube_RecovableFlow_Return_SW = Four.SplitGW_SW_Return.Fraction_Based(nc_outname, "Recovable_Flow", Startdate_part, Enddate_part)
# Get the data of Evaporation and save as nc
DC.Add_NC_Array_Variable(nc_outname, DataCube_RecovableFlow_Return_GW, "Recovable_Flow_Ground_Water", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, Datacube_RecovableFlow_Return_SW, "Recovable_Flow_Surface_Water", "mm/month", 0.01)
del DataCube_RecovableFlow_Return_GW, Datacube_RecovableFlow_Return_SW
############################ Create CSV 4 #################################
Dir_Basin_CSV, Unit_front = Generate.CSV.Create(Dir_Basin, Simulation, Basin, Startdate_part, Enddate_part, nc_outname)
############################ Create Sheet 4 ###############################
Generate.PDF.Create(Dir_Basin, Basin, Simulation, Dir_Basin_CSV, Unit_front)
return()
| apache-2.0 |
rishikksh20/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 21 | 4784 | """
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying :class:`sklearn.decomposition.NMF`
and :class:`sklearn.decomposition.LatentDirichletAllocation` on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
Non-negative Matrix Factorization is applied with two different objective
functions: the Frobenius norm, and the generalized Kullback-Leibler divergence.
The latter is equivalent to Probabilistic Latent Semantic Indexing.
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
print()
# Fit the NMF model
print("Fitting the NMF model (Frobenius norm) with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (Frobenius norm):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
# Fit the NMF model
print("Fitting the NMF model (generalized Kullback-Leibler divergence) with "
"tf-idf features, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1, beta_loss='kullback-leibler',
solver='mu', max_iter=1000, alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (generalized Kullback-Leibler divergence):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
giorgiop/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 84 | 1221 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([[x1, x2]])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
NelisVerhoef/scikit-learn | sklearn/tests/test_naive_bayes.py | 70 | 17509 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
yyjiang/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
rmcgibbo/scipy | scipy/stats/_binned_statistic.py | 17 | 17622 | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import callable
from collections import namedtuple
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for a set of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : array_like
A sequence of values to be binned.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``.
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First a basic example:
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled', alpha=0.2,
... label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, xy = binned_statistic_dd([x], values, statistic,
bins, range)
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
return BinnedStatisticResult(medians, edges[0], xy)
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None):
"""
Compute a bidimensional binned statistic for a set of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (M,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx=ny=bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edges = y_edges = bins),
* the bin edges in each dimension (x_edges, y_edges = bins).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin
x_edges : (nx + 1) ndarray
The bin edges along the first dimension.
y_edges : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as `values`.
See Also
--------
numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
.. versionadded:: 0.11.0
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, xy = binned_statistic_dd([x, y], values, statistic,
bins, range)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
return BinnedStatistic2dResult(medians, edges[0], edges[1], xy)
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as x.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
np.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# This code is based on np.histogramdd
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D * [None]
dedges = D * [None]
try:
M = len(bins)
if M != D:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = D * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(0), float))
smax = np.atleast_1d(np.array(sample.max(0), float))
else:
smin = np.zeros(D)
smax = np.zeros(D)
for i in np.arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in np.arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in np.arange(D):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in np.arange(D):
Ncount[i] = np.digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in np.arange(D):
# Rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal)
== np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
xy = np.zeros(N, int)
for i in np.arange(0, D - 1):
xy += Ncount[ni[i]] * nbin[ni[i + 1:]].prod()
xy += Ncount[ni[-1]]
result = np.empty(nbin.prod(), float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
a = flatcount.nonzero()
result[a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
flatsum2 = np.bincount(xy, values ** 2)
a = flatcount.nonzero()
result[a] = np.sqrt(flatsum2[a] / flatcount[a]
- (flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(xy, None)
a = np.arange(len(flatcount))
result[a] = flatcount
elif statistic == 'sum':
result.fill(0)
flatsum = np.bincount(xy, values)
a = np.arange(len(flatsum))
result[a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(xy):
result[i] = np.median(values[xy == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(xy):
result[i] = statistic(values[xy == i])
# Shape into a proper matrix
result = result.reshape(np.sort(nbin))
for i in np.arange(nbin.size):
j = ni.argsort()[i]
result = result.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D * [slice(1, -1)]
result = result[core]
if (result.shape != nbin - 2).any():
raise RuntimeError('Internal Shape Error')
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
return BinnedStatisticddResult(result, edges, xy)
| bsd-3-clause |
rvraghav93/scikit-learn | examples/svm/plot_svm_margin.py | 88 | 2540 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors (margin away from hyperplane in direction
# perpendicular to hyperplane). This is sqrt(1+a^2) away vertically in
# 2-d.
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy - np.sqrt(1 + a ** 2) * margin
yy_up = yy + np.sqrt(1 + a ** 2) * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10, edgecolors='k')
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired,
edgecolors='k')
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
festeh/BuildingMachineLearningSystemsWithPython | ch06/utils.py | 22 | 6937 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import sys
import collections
import csv
import json
from matplotlib import pylab
import numpy as np
DATA_DIR = "data"
CHART_DIR = "charts"
if not os.path.exists(DATA_DIR):
raise RuntimeError("Expecting directory 'data' in current path")
if not os.path.exists(CHART_DIR):
os.mkdir(CHART_DIR)
def tweak_labels(Y, pos_sent_list):
pos = Y == pos_sent_list[0]
for sent_label in pos_sent_list[1:]:
pos |= Y == sent_label
Y = np.zeros(Y.shape[0])
Y[pos] = 1
Y = Y.astype(int)
return Y
def load_sanders_data(dirname=".", line_count=-1):
count = 0
topics = []
labels = []
tweets = []
with open(os.path.join(DATA_DIR, dirname, "corpus.csv"), "r") as csvfile:
metareader = csv.reader(csvfile, delimiter=',', quotechar='"')
for line in metareader:
count += 1
if line_count > 0 and count > line_count:
break
topic, label, tweet_id = line
tweet_fn = os.path.join(
DATA_DIR, dirname, 'rawdata', '%s.json' % tweet_id)
try:
tweet = json.load(open(tweet_fn, "r"))
except IOError:
print(("Tweet '%s' not found. Skip." % tweet_fn))
continue
if 'text' in tweet and tweet['user']['lang'] == "en":
topics.append(topic)
labels.append(label)
tweets.append(tweet['text'])
tweets = np.asarray(tweets)
labels = np.asarray(labels)
return tweets, labels
def plot_pr(auc_score, name, phase, precision, recall, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.fill_between(recall, precision, alpha=0.5)
pylab.plot(recall, precision, lw=1)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R curve (AUC=%0.2f) / %s' % (auc_score, label))
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(CHART_DIR, "pr_%s_%s.png" %
(filename, phase)), bbox_inches="tight")
def show_most_informative_features(vectorizer, clf, n=20):
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
top = list(zip(c_f[:n], c_f[:-(n + 1):-1]))
for (c1, f1), (c2, f2) in top:
print("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
def plot_log():
pylab.clf()
pylab.figure(num=None, figsize=(6, 5))
x = np.arange(0.001, 1, 0.001)
y = np.log(x)
pylab.title('Relationship between probabilities and their logarithm')
pylab.plot(x, y)
pylab.grid(True)
pylab.xlabel('P')
pylab.ylabel('log(P)')
filename = 'log_probs.png'
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_feat_importance(feature_names, clf, name):
pylab.clf()
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(list(range(len(coef))))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot_feat_hist(data_name_list, filename=None):
pylab.clf()
num_rows = 1 + (len(data_name_list) - 1) / 2
num_cols = 1 if len(data_name_list) == 1 else 2
pylab.figure(figsize=(5 * num_cols, 4 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
x, name = data_name_list[i * num_cols + j]
pylab.title(name)
pylab.xlabel('Value')
pylab.ylabel('Density')
# the histogram of the data
max_val = np.max(x)
if max_val <= 1.0:
bins = 50
elif max_val > 50:
bins = 50
else:
bins = max_val
n, bins, patches = pylab.hist(
x, bins=bins, normed=1, facecolor='green', alpha=0.75)
pylab.grid(True)
if not filename:
filename = "feat_hist_%s.png" % name
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_bias_variance(data_sizes, train_errors, test_errors, name):
pylab.clf()
pylab.ylim([0.0, 1.0])
pylab.xlabel('Data set size')
pylab.ylabel('Error')
pylab.title("Bias-Variance for '%s'" % name)
pylab.plot(
data_sizes, train_errors, "-", data_sizes, test_errors, "--", lw=1)
pylab.legend(["train error", "test error"], loc="upper right")
pylab.grid()
pylab.savefig(os.path.join(CHART_DIR, "bv_" + name + ".png"))
def load_sent_word_net():
sent_scores = collections.defaultdict(list)
sentiwordnet_path = os.path.join(DATA_DIR, "SentiWordNet_3.0.0_20130122.txt")
if not os.path.exists(sentiwordnet_path):
print("Please download SentiWordNet_3.0.0 from http://sentiwordnet.isti.cnr.it/download.php, extract it and put it into the data directory")
sys.exit(1)
with open(sentiwordnet_path, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='"')
for line in reader:
if line[0].startswith("#"):
continue
if len(line) == 1:
continue
POS, ID, PosScore, NegScore, SynsetTerms, Gloss = line
if len(POS) == 0 or len(ID) == 0:
continue
# print POS,PosScore,NegScore,SynsetTerms
for term in SynsetTerms.split(" "):
# drop #number at the end of every term
term = term.split("#")[0]
term = term.replace("-", " ").replace("_", " ")
key = "%s/%s" % (POS, term.split("#")[0])
sent_scores[key].append((float(PosScore), float(NegScore)))
for key, value in sent_scores.items():
sent_scores[key] = np.mean(value, axis=0)
return sent_scores
def log_false_positives(clf, X, y, name):
with open("FP_" + name.replace(" ", "_") + ".tsv", "w") as f:
false_positive = clf.predict(X) != y
for tweet, false_class in zip(X[false_positive], y[false_positive]):
f.write("%s\t%s\n" %
(false_class, tweet.encode("ascii", "ignore")))
if __name__ == '__main__':
plot_log()
| mit |
phdowling/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
Sentient07/scikit-learn | benchmarks/bench_sgd_regression.py | 61 | 5612 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
plt.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("RMSE")
plt.title("Test error - %d features" % list_n_features[j])
i += 1
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("Time [sec]")
plt.title("Training time - %d features" % list_n_features[j])
i += 1
plt.subplots_adjust(hspace=.30)
plt.show()
| bsd-3-clause |
jaeilepp/mne-python | mne/decoding/search_light.py | 1 | 24632 | # Author: Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from .mixin import TransformerMixin
from .base import BaseEstimator, _check_estimator
from ..parallel import parallel_func
class SlidingEstimator(BaseEstimator, TransformerMixin):
"""Search Light.
Fit, predict and score a series of models to each subset of the dataset
along the last dimension. Each entry in the last dimension is referred
to as a task.
Parameters
----------
base_estimator : object
The base estimator to iteratively fit on a subset of the dataset.
scoring : callable, string, defaults to None
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
Note that the predict_method is automatically identified if scoring is
a string (e.g. scoring="roc_auc" calls predict_proba) but is not
automatically set if scoring is a callable (e.g.
scoring=sklearn.metrics.roc_auc_score).
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
Attributes
----------
``estimators_`` : array-like, shape (n_tasks,)
List of fitted scikit-learn estimators (one per task).
"""
def __init__(self, base_estimator, scoring=None, n_jobs=1): # noqa: D102
_check_estimator(base_estimator)
self.base_estimator = base_estimator
self.n_jobs = n_jobs
self.scoring = scoring
if not isinstance(self.n_jobs, int):
raise ValueError('n_jobs must be int, got %s' % n_jobs)
def __repr__(self): # noqa: D105
repr_str = '<' + super(SlidingEstimator, self).__repr__()
if hasattr(self, 'estimators_'):
repr_str = repr_str[:-1]
repr_str += ', fitted with %i estimators' % len(self.estimators_)
return repr_str + '>'
def fit(self, X, y):
"""Fit a series of independent estimators to the dataset.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The training input samples. For each data slice, a clone estimator
is fitted independently. The feature dimension can be
multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
self : object
Return self.
"""
self._check_Xy(X, y)
self.estimators_ = list()
# For fitting, the parallelization is across estimators.
parallel, p_func, n_jobs = parallel_func(_sl_fit, self.n_jobs)
n_jobs = min(n_jobs, X.shape[-1])
estimators = parallel(
p_func(self.base_estimator, split, y)
for split in np.array_split(X, n_jobs, axis=-1))
self.estimators_ = np.concatenate(estimators, 0)
return self
def fit_transform(self, X, y):
"""Fit and transform a series of independent estimators to the dataset.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The training input samples. For each task, a clone estimator
is fitted independently. The feature dimension can be
multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
y_pred : array, shape (n_samples, n_tasks) | (n_samples, n_tasks, n_targets)
The predicted values for each estimator.
""" # noqa: E501
return self.fit(X, y).transform(X)
def _transform(self, X, method):
"""Aux. function to make parallel predictions/transformation."""
self._check_Xy(X)
method = _check_method(self.base_estimator, method)
if X.shape[-1] != len(self.estimators_):
raise ValueError('The number of estimators does not match '
'X.shape[-1]')
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
parallel, p_func, n_jobs = parallel_func(_sl_transform, self.n_jobs)
n_jobs = min(n_jobs, X.shape[-1])
X_splits = np.array_split(X, n_jobs, axis=-1)
est_splits = np.array_split(self.estimators_, n_jobs)
y_pred = parallel(p_func(est, x, method)
for (est, x) in zip(est_splits, X_splits))
y_pred = np.concatenate(y_pred, axis=1)
return y_pred
def transform(self, X):
"""Transform each data slice/task with a series of independent estimators.
The number of tasks in X should match the number of tasks/estimators
given at fit time.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice/task, the corresponding
estimator makes a transformation of the data, e.g.
``[estimators[ii].transform(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
Returns
-------
Xt : array, shape (n_samples, n_estimators)
The transformed values generated by each estimator.
""" # noqa: E501
return self._transform(X, 'transform')
def predict(self, X):
"""Predict each data slice/task with a series of independent estimators.
The number of tasks in X should match the number of tasks/estimators
given at fit time.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
makes the sample predictions, e.g.:
``[estimators[ii].predict(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
Returns
-------
y_pred : array, shape (n_samples, n_estimators) | (n_samples, n_tasks, n_targets)
Predicted values for each estimator/data slice.
""" # noqa: E501
return self._transform(X, 'predict')
def predict_proba(self, X):
"""Predict each data slice with a series of independent estimators.
The number of tasks in X should match the number of tasks/estimators
given at fit time.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
makes the sample probabilistic predictions, e.g.:
``[estimators[ii].predict_proba(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
Returns
-------
y_pred : array, shape (n_samples, n_tasks, n_classes)
Predicted probabilities for each estimator/data slice/task.
""" # noqa: E501
return self._transform(X, 'predict_proba')
def decision_function(self, X):
"""Estimate distances of each data slice to the hyperplanes.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
outputs the distance to the hyperplane, e.g.:
``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2)
Predicted distances for each estimator/data slice.
Notes
-----
This requires base_estimator to have a ``decision_function`` method.
""" # noqa: E501
return self._transform(X, 'decision_function')
def _check_Xy(self, X, y=None):
"""Aux. function to check input data."""
if y is not None:
if len(X) != len(y) or len(y) < 1:
raise ValueError('X and y must have the same length.')
if X.ndim < 3:
raise ValueError('X must have at least 3 dimensions.')
def score(self, X, y):
"""Score each estimator on each task.
The number of tasks in X should match the number of tasks/estimators
given at fit time, i.e. we need
``X.shape[-1] == len(self.estimators_)``.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
scores the prediction, e.g.:
``[estimators[ii].score(X[..., ii], y) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_samples, n_estimators)
Score for each estimator/task.
""" # noqa: E501
from sklearn.metrics.scorer import check_scoring
self._check_Xy(X)
if X.shape[-1] != len(self.estimators_):
raise ValueError('The number of estimators does not match '
'X.shape[-1]')
scoring = check_scoring(self.base_estimator, self.scoring)
y = _fix_auc(scoring, y)
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
parallel, p_func, n_jobs = parallel_func(_sl_score, self.n_jobs)
n_jobs = min(n_jobs, X.shape[-1])
X_splits = np.array_split(X, n_jobs, axis=-1)
est_splits = np.array_split(self.estimators_, n_jobs)
score = parallel(p_func(est, scoring, x, y)
for (est, x) in zip(est_splits, X_splits))
score = np.concatenate(score, axis=0)
return score
def _sl_fit(estimator, X, y):
"""Aux. function to fit SlidingEstimator in parallel.
Fit a clone estimator to each slice of data.
Parameters
----------
base_estimator : object
The base estimator to iteratively fit on a subset of the dataset.
X : array, shape (n_samples, nd_features, n_estimators)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_sample, )
The target values.
Returns
-------
estimators_ : list of estimators
The fitted estimators.
"""
from sklearn.base import clone
estimators_ = list()
for ii in range(X.shape[-1]):
est = clone(estimator)
est.fit(X[..., ii], y)
estimators_.append(est)
return estimators_
def _sl_transform(estimators, X, method):
"""Aux. function to transform SlidingEstimator in parallel.
Applies transform/predict/decision_function etc for each slice of data.
Parameters
----------
estimators : list of estimators
The fitted estimators.
X : array, shape (n_samples, nd_features, n_estimators)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
method : str
The estimator method to use (e.g. 'predict', 'transform').
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2)
The transformations for each slice of data.
""" # noqa: E501
for ii, est in enumerate(estimators):
transform = getattr(est, method)
_y_pred = transform(X[..., ii])
# Initialize array of predictions on the first transform iteration
if ii == 0:
y_pred = _sl_init_pred(_y_pred, X)
y_pred[:, ii, ...] = _y_pred
return y_pred
def _sl_init_pred(y_pred, X):
"""Aux. function to SlidingEstimator to initialize y_pred."""
n_sample, n_tasks = X.shape[0], X.shape[-1]
y_pred = np.zeros((n_sample, n_tasks) + y_pred.shape[1:], y_pred.dtype)
return y_pred
def _sl_score(estimators, scoring, X, y):
"""Aux. function to score SlidingEstimator in parallel.
Predict and score each slice of data.
Parameters
----------
estimators : list, shape (n_tasks,)
The fitted estimators.
X : array, shape (n_samples, nd_features, n_tasks)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
scoring : callable, string or None
If scoring is None (default), the predictions are internally
generated by estimator.score(). Else, we must first get the
predictions to pass them to ad-hoc scorer.
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_tasks,)
The score for each task / slice of data.
"""
n_tasks = X.shape[-1]
score = np.zeros(n_tasks)
for ii, est in enumerate(estimators):
score[ii] = scoring(est, X[..., ii], y)
return score
def _check_method(estimator, method):
"""Check that an estimator has the method attribute.
If method == 'transform' and estimator does not have 'transform', use
'predict' instead.
"""
if method == 'transform' and not hasattr(estimator, 'transform'):
method = 'predict'
if not hasattr(estimator, method):
ValueError('base_estimator does not have `%s` method.' % method)
return method
class GeneralizingEstimator(SlidingEstimator):
"""Generalization Light.
Fit a search-light along the last dimension and use them to apply a
systematic cross-tasks generalization.
Parameters
----------
base_estimator : object
The base estimator to iteratively fit on a subset of the dataset.
scoring : callable | string | None
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
Note that the predict_method is automatically identified if scoring is
a string (e.g. scoring="roc_auc" calls predict_proba) but is not
automatically set if scoring is a callable (e.g.
scoring=sklearn.metrics.roc_auc_score).
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
"""
def __repr__(self): # noqa: D105
repr_str = super(GeneralizingEstimator, self).__repr__()
if hasattr(self, 'estimators_'):
repr_str = repr_str[:-1]
repr_str += ', fitted with %i estimators>' % len(self.estimators_)
return repr_str
def _transform(self, X, method):
"""Aux. function to make parallel predictions/transformation."""
self._check_Xy(X)
method = _check_method(self.base_estimator, method)
parallel, p_func, n_jobs = parallel_func(_gl_transform, self.n_jobs)
n_jobs = min(n_jobs, X.shape[-1])
y_pred = parallel(
p_func(self.estimators_, x_split, method)
for x_split in np.array_split(X, n_jobs, axis=-1))
y_pred = np.concatenate(y_pred, axis=2)
return y_pred
def transform(self, X):
"""Transform each data slice with all possible estimators.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The input samples. For estimator the corresponding data slice is
used to make a transformation. The feature dimension can be
multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
Returns
-------
Xt : array, shape (n_samples, n_estimators, n_slices)
The transformed values generated by each estimator.
"""
return self._transform(X, 'transform')
def predict(self, X):
"""Predict each data slice with all possible estimators.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. For each data slice, a fitted estimator
predicts each slice of the data independently. The feature
dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_slices) | (n_samples, n_estimators, n_slices, n_targets)
The predicted values for each estimator.
""" # noqa: E501
return self._transform(X, 'predict')
def predict_proba(self, X):
"""Estimate probabilistic estimates of each data slice with all possible estimators.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. For each data slice, a fitted estimator
predicts a slice of the data. The feature dimension can be
multidimensional e.g.
``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes)
The predicted values for each estimator.
Notes
-----
This requires base_estimator to have a `predict_proba` method.
""" # noqa: E501
return self._transform(X, 'predict_proba')
def decision_function(self, X):
"""Estimate distances of each data slice to all hyperplanes.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. Each estimator outputs the distance to
its hyperplane, e.g.:
``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes * (n_classes-1) // 2)
The predicted values for each estimator.
Notes
-----
This requires base_estimator to have a ``decision_function`` method.
""" # noqa: E501
return self._transform(X, 'decision_function')
def score(self, X, y):
"""Score each of the estimators on the tested dimensions.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The input samples. For each data slice, the corresponding estimator
scores the prediction, e.g.:
``[estimators[ii].score(X[..., ii], y) for ii in range(n_slices)]``.
The feature dimension can be multidimensional e.g.
``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_samples, n_estimators, n_slices)
Score for each estimator / data slice couple.
""" # noqa: E501
from sklearn.metrics.scorer import check_scoring
self._check_Xy(X)
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
parallel, p_func, n_jobs = parallel_func(_gl_score, self.n_jobs)
n_jobs = min(n_jobs, X.shape[-1])
X_splits = np.array_split(X, n_jobs, axis=-1)
scoring = check_scoring(self.base_estimator, self.scoring)
y = _fix_auc(scoring, y)
score = parallel(p_func(self.estimators_, scoring, x, y)
for x in X_splits)
score = np.concatenate(score, axis=1)
return score
def _gl_transform(estimators, X, method):
"""Transform the dataset.
This will apply each estimator to all slices of the data.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. For each data slice, a clone estimator
is fitted independently. The feature dimension can be multidimensional
e.g. X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
Returns
-------
Xt : array, shape (n_samples, n_slices)
The transformed values generated by each estimator.
"""
n_sample, n_iter = X.shape[0], X.shape[-1]
for ii, est in enumerate(estimators):
# stack generalized data for faster prediction
X_stack = X.transpose(np.r_[0, X.ndim - 1, range(1, X.ndim - 1)])
X_stack = X_stack.reshape(np.r_[n_sample * n_iter, X_stack.shape[2:]])
transform = getattr(est, method)
_y_pred = transform(X_stack)
# unstack generalizations
if _y_pred.ndim == 2:
_y_pred = np.reshape(_y_pred, [n_sample, n_iter, _y_pred.shape[1]])
else:
shape = np.r_[n_sample, n_iter, _y_pred.shape[1:]].astype(int)
_y_pred = np.reshape(_y_pred, shape)
# Initialize array of predictions on the first transform iteration
if ii == 0:
y_pred = _gl_init_pred(_y_pred, X, len(estimators))
y_pred[:, ii, ...] = _y_pred
return y_pred
def _gl_init_pred(y_pred, X, n_train):
"""Aux. function to GeneralizingEstimator to initialize y_pred."""
n_sample, n_iter = X.shape[0], X.shape[-1]
if y_pred.ndim == 3:
y_pred = np.zeros((n_sample, n_train, n_iter, y_pred.shape[-1]),
y_pred.dtype)
else:
y_pred = np.zeros((n_sample, n_train, n_iter), y_pred.dtype)
return y_pred
def _gl_score(estimators, scoring, X, y):
"""Score GeneralizingEstimator in parallel.
Predict and score each slice of data.
Parameters
----------
estimators : list of estimators
The fitted estimators.
scoring : callable, string or None
If scoring is None (default), the predictions are internally
generated by estimator.score(). Else, we must first get the
predictions to pass them to ad-hoc scorer.
X : array, shape (n_samples, nd_features, n_slices)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_estimators, n_slices)
The score for each slice of data.
"""
# FIXME: The level parallization may be a bit high, and might be memory
# consuming. Perhaps need to lower it down to the loop across X slices.
score_shape = [len(estimators), X.shape[-1]]
for ii, est in enumerate(estimators):
for jj in range(X.shape[-1]):
_score = scoring(est, X[..., jj], y)
# Initialize array of predictions on the first score iteration
if (ii == 0) & (jj == 0):
dtype = type(_score)
score = np.zeros(score_shape, dtype)
score[ii, jj, ...] = _score
return score
def _fix_auc(scoring, y):
from sklearn.preprocessing import LabelEncoder
# This fixes sklearn's inability to compute roc_auc when y not in [0, 1]
# scikit-learn/scikit-learn#6874
if scoring is not None:
if (
hasattr(scoring, '_score_func') and
hasattr(scoring._score_func, '__name__') and
scoring._score_func.__name__ == 'roc_auc_score'
):
if np.ndim(y) != 1 or len(set(y)) != 2:
raise ValueError('roc_auc scoring can only be computed for '
'two-class problems.')
y = LabelEncoder().fit_transform(y)
return y
| bsd-3-clause |
ZenDevelopmentSystems/scikit-learn | sklearn/preprocessing/__init__.py | 268 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
rexshihaoren/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
homeslike/OpticalTweezer | scripts/p0.4_at0.1/vCOMhistogram.py | 28 | 2448 | import math
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from subprocess import call
from scipy.stats import norm
# proc = call("ls *.dat",shell=True)
# datetime = "170123_2033_"
datetime = sys.argv[1]+"_"
gasTempDataIn = np.genfromtxt(datetime+"gasTempData.dat",usecols=0,skip_header=100)
gasTempDataOut = np.genfromtxt(datetime+"gasTempData.dat",usecols=1,skip_header=100)
vCOMData_x = np.genfromtxt(datetime+"vCOMData.dat",usecols=0,skip_header=100)
vCOMData_y = np.genfromtxt(datetime+"vCOMData.dat",usecols=1,skip_header=100)
vCOMData_z = np.genfromtxt(datetime+"vCOMData.dat",usecols=2,skip_header=100)
N = 32
vSqd = []
for i in range(0,len(vCOMData_x)):
vSqd.append((vCOMData_x[i]*vCOMData_x[i]+vCOMData_x[i]*vCOMData_x[i]+vCOMData_x[i]*vCOMData_x[i])*0.5)
vSqdMean = np.mean(vSqd)
histogram_x,bins_x = np.histogram(vCOMData_x,bins=100,normed=True)
histogram_y,bins_y = np.histogram(vCOMData_y,bins=100,normed=True)
histogram_z,bins_z = np.histogram(vCOMData_z,bins=100,normed=True)
inTemp = np.mean(gasTempDataIn)
outTemp = np.mean(gasTempDataOut)
statistics = open(datetime+"statistics.dat","w")
statistics.write("GasIn: " + str(inTemp)+"\n")
statistics.write("GasOut: " + str(outTemp)+"\n")
statistics.write("T_COM: " + str(2./3. * vSqdMean)+"\n")
statistics.write("Mu_x " + str(np.mean(vCOMData_x))+"\n")
statistics.write("Sigma_x: " + str(np.std(vCOMData_x))+"\n")
statistics.write("Mu_y " + str(np.mean(vCOMData_y))+"\n")
statistics.write("Sigma_y: " + str(np.std(vCOMData_y))+"\n")
statistics.write("Mu_z " + str(np.mean(vCOMData_z))+"\n")
statistics.write("Sigma_z: " + str(np.std(vCOMData_z))+"\n")
histogram_x_file = open(datetime+"histogram_vx.dat","w")
histogram_y_file = open(datetime+"histogram_vy.dat","w")
histogram_z_file = open(datetime+"histogram_vz.dat","w")
for i in range(0,len(histogram_x)):
histogram_x_file.write(str(bins_x[i]) + "\t" + str(histogram_x[i]) + "\n")
histogram_y_file.write(str(bins_y[i]) + "\t" + str(histogram_y[i]) + "\n")
histogram_z_file.write(str(bins_z[i]) + "\t" + str(histogram_z[i]) + "\n")
# plt.figure(1)
# plt.hist(vCOMData_x,bins=100)
# plt.figure(2)
# plt.hist(vCOMData_y,bins=100)
# plt.figure(3)
# plt.hist(vCOMData_z,bins=100)
# plt.show()
# plt.figure(1)
# plt.plot(vSqd)
# plt.plot((0,700),(vSqdMean,vSqdMean))
# plt.figure(2)
# plt.hist(vCOMData_x,bins=100,normed=True)
# plt.plot(x,gasInPDF)
# plt.show()
| mit |
dgwakeman/mne-python | examples/plot_compute_mne_inverse.py | 21 | 1885 | """
================================================
Compute MNE-dSPM inverse solution on evoked data
================================================
Compute dSPM inverse solution on MNE evoked dataset
and stores the solution in stc files for visualisation.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
from mne.minimum_norm import apply_inverse, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori=None)
# Save result in stc files
stc.save('mne_%s_inverse' % method)
###############################################################################
# View activation time-series
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
# Plot brain in 3D with PySurfer if available
brain = stc.plot(hemi='rh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# use peak getter to move vizualization to the time point of the peak
vertno_max, time_idx = stc.get_peak(hemi='rh', time_as_index=True)
brain.set_data_time_index(time_idx)
# draw marker at maximum peaking vertex
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6)
brain.save_image('dSPM_map.png')
| bsd-3-clause |
silverfield/pythonsessions | s10_complexity/complexity.py | 1 | 7757 | __author__ = 'ferrard'
# ---------------------------------------------------------------
# Imports
# ---------------------------------------------------------------
import time
import matplotlib.pyplot as plt
import numpy as np
import random
# ---------------------------------------------------------------
# Interface - Timing function
# ---------------------------------------------------------------
def time_them(k, m, *functions):
"""Times the functions (accepting one argument - n) on k values of n up to m
Stops the timing once the function's execution takes:
- more then 2 sec
- more then 1 sec longer then on previous value of n
"""
n_values = list(range(1, m))
if m > k:
n_values = list(range(1, m, m//k))
results = []
for i in range(len(functions)):
results_for_f = []
for n in n_values:
before = time.time()
functions[i](n)
after = time.time()
results_for_f.append(after - before)
if results_for_f[-1] > 2 or (len(results_for_f) > 1 and results_for_f[-1] - results_for_f[-2] > 1):
break
results.append(results_for_f)
for i in range(len(functions)):
plt.plot(n_values[:len(results[i])], results[i], label=functions[i].__name__)
plt.legend()
plt.show()
# ---------------------------------------------------------------
# Interface - try out
# ---------------------------------------------------------------
def n_sqrt_n(n):
res = 0
for i in range(n*int(sp.sqrt(n))):
res += 1
return res
def n_squared(n):
res = 0
for i in range(n*n):
res += 1
return res
# ---------------------------------------------------------------
# Interface - Sum to
# ---------------------------------------------------------------
def sum_builtin(n):
"""Sums numbers up to n using built-in function - O(n)"""
print(sum(range(n)))
def sum_explicit(n):
"""Sums numbers up to n explicitely - O(n)"""
total = 0
for i in range(n):
total += i
print(total)
def sum_analytic(n):
"""Sums numbers up to n, analytically - O(1)"""
print(n*(n + 1)//2)
# ---------------------------------------------------------------
# Interface - Sum of squares to
# ---------------------------------------------------------------
def sum_sq_builtin(n):
"""Sums squares of numbers up to n using built-in function - O(n)"""
print(sum(i*i for i in range(n)))
def sum_sq_explicit(n):
"""Sums squares of numbers up to n explicitely - O(n)"""
total = 0
for i in range(n):
total += i*i
print(total)
def sum_sq_analytic(n):
"""Sums squares of numbers up to n, analytically - O(1)"""
print(n*(n + 1)*(2*n + 1)//6)
# ---------------------------------------------------------------
# Sorting
# ---------------------------------------------------------------
MAX_SORT_NUMBER = 1000
def check_is_sorted(l):
n = len(l)
print(str(n) + " - Sorted OK" if all(l[i] <= l[i + 1] for i in range(n - 1)) else "Sorted NOK")
def sort_quadratic(n):
"""Sort n random numbers - using inefficient quadratic sort - O(n^2)"""
l = list(np.random.random_integers(0, MAX_SORT_NUMBER, n))
for i in range(n):
for j in range(i + 1, n):
if l[i] > l[j]:
tmp = l[i]
l[i] = l[j]
l[j] = tmp
check_is_sorted(l)
def sort_inbuilt(n):
"""Sorts n random numbers - using efficient inbuilt function - O(n log n)"""
l = list(np.random.random_integers(0, MAX_SORT_NUMBER, n))
l.sort()
check_is_sorted(l)
def sort_counting(n):
"""Sorts n random numbers bounded in a small range - using efficient linear sort - O(n)"""
l = list(np.random.random_integers(0, MAX_SORT_NUMBER, n))
counts = [0]*(MAX_SORT_NUMBER + 1)
for i in l:
counts[i] += 1
counter = 0
for i in range(len(counts)):
for j in range(counts[i]):
l[counter] = i
counter += 1
check_is_sorted(l)
# ---------------------------------------------------------------
# Fibonnachi numbers
# ---------------------------------------------------------------
def fib_n_naive(n):
"""Naive (recursive) way to compute Fibonacci's numbers. O(F(n))"""
if n == 0:
return 0
if n == 1:
return 1
return fib_n_naive(n - 1) + fib_n_naive(n - 2)
def fib_n(n):
"""Efficient way to compute Fibonacci's numbers. Complexity = O(n)"""
fibs = [0, 1] # we don't need to store all along the way, but the memory is still as good as in the naive alg
for i in range(2, n + 1):
fibs.append(fibs[-2] + fibs[-1])
print(fibs[-1])
return fibs[-1]
def fib_n_closed(n):
"""Closed-form computation Fibonacci's numbers. Complexity = O(n)
WRONG! Problems with precision!
"""
fi = (1 + np.sqrt(5))/2
res = int(np.round((fi**n - (-fi)**(-n))/np.sqrt(5)))
print(res)
return res
# ---------------------------------------------------------------
# Primality tests
# ---------------------------------------------------------------
def check_hundred_primes_naive(n):
"""Checks 100 random numbers with n digits - if they are prime, using the naive alg. O(2^(sqrt(n))"""
for i in range(100):
number = random.randint(10**n, 10**(n + 1))
primality_test_naive(number)
def check_hundred_primes_miller_rabin(n):
"""Checks 100 random numbers with n digits - if they are prime, using Miller-Rabin test. O(n)"""
for i in range(100):
number = random.randint(10**n, 10**(n + 1))
primality_test_miller_rabin(number)
def primality_test_naive(n):
"""Does primality test for n in a naive way. Complexity O(sqrt(n))"""
print("Checking " + str(n))
if n % 2 == 0:
n += 1
for i in range(2, n):
if i*i > n:
break
if n % i == 0:
print(str(n) + " is composite")
return False
print(str(n) + " is prime")
return True
def modular_exp(a, b, n):
"""Computes a^b mod n. Complexity O(log(b))"""
res = 1
q = a
while b > 0:
if b % 2 == 1:
res = q*res % n
q = q*q % n
b //= 2
return res
def primality_test_miller_rabin(n):
"""Miller-Rabin primality test. Complexity O(log(n))"""
print("Checking " + str(n))
if n % 2 == 0:
n += 1
# write m as t*2^s
s = 0
x = 2
while (n - 1) % x == 0:
s += 1
x *= 2
s -= 1
x //= 2
t = (n - 1)//x
# do k iterations, looking for witnesses
k = 10
for i in range(k):
b = random.randint(2, n - 2)
l = modular_exp(b, t, n)
if l in [1, n - 1]:
continue
for j in range(1, s):
l = l*l % n
if l == n - 1:
break
if l != n - 1: # found witness
print(str(n) + " is composite")
return False
print(str(n) + " is prime with probability > " + str(1 - (1/4)**k))
return True
# ---------------------------------------------------------------
# Main
# ---------------------------------------------------------------
def main():
print(modular_exp(111, 98, 197))
exit()
time_them(20, 1000000, sum_builtin, sum_explicit, sum_analytic)
time_them(20, 100000, sum_sq_builtin, sum_sq_explicit, sum_sq_analytic)
time_them(25, 50, fib_n, fib_n_naive)
time_them(25, 500, fib_n, fib_n_closed)
time_them(10, 10000, sort_counting, sort_inbuilt, sort_quadratic)
time_them(10, 1000000, sort_counting, sort_inbuilt)
time_them(30, 80, check_hundred_primes_naive, check_hundred_primes_miller_rabin)
if __name__ == '__main__':
main()
| mit |
equialgo/scikit-learn | sklearn/utils/random.py | 46 | 10523 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if p is not None:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if p is not None:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if p is not None:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
jseabold/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
yuraic/koza4ok | test/run1/draw_roc_sklearn_vs_skTMVA_electrons.py | 1 | 1458 | import ROOT
from ROOT import TGraphErrors
from array import array
from mva_tools.build_roc_simple import build_roc
if __name__ == "__main__":
path = "/Users/musthero/Documents/Yura/Applications/tmva_local/BDT_score_distributions_electrons.root"
hsig_skTMVA_path = "histo_tmva_sig"
hbkg_skTMVA_path = "histo_tmva_bkg"
hsig_sklearn_path = "histo_sk_sig"
hbkg_sklearn_path = "histo_sk_bkg"
rootfile = ROOT.TFile.Open(path)
if rootfile.IsZombie():
print "Root file is corrupt"
hSig_skTMVA = rootfile.Get(hsig_skTMVA_path)
hBkg_skTMVA = rootfile.Get(hbkg_skTMVA_path)
hSig_sklearn = rootfile.Get(hsig_sklearn_path)
hBkg_sklearn = rootfile.Get(hbkg_sklearn_path)
# Stack for keeping plots
plots = []
# Getting ROC-curve for skTMVA
g1 = build_roc(hSig_skTMVA, hBkg_skTMVA)
g1.SetName("g1")
g1.SetTitle("ROC curve [electrons]")
plots.append(g1)
g1.SetLineColor(ROOT.kBlue)
g1.Draw("AL") # draw TGraph with no marker dots
# Getting ROC-curve for sklearn
g2 = build_roc(hSig_sklearn, hBkg_sklearn)
g2.SetName("g2")
g2.SetTitle("ROC curve [electrons]")
plots.append(g2)
g2.SetLineStyle(7)
g2.SetLineColor(ROOT.kRed)
g2.Draw("SAME") # draw TGraph with no marker dots
leg = ROOT. TLegend(0.1,0.5,0.3,0.4)
#leg.SetHeader("ROC curve")
leg.AddEntry("g1","skTMVA","l")
leg.AddEntry("g2","sklearn","l")
leg.Draw()
| mit |
jkthompson/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/quiver.py | 69 | 36790 | """
Support for plotting vector fields.
Presently this contains Quiver and Barb. Quiver plots an arrow in the
direction of the vector, with the size of the arrow related to the
magnitude of the vector.
Barbs are like quiver in that they point along a vector, but
the magnitude of the vector is given schematically by the presence of barbs
or flags on the barb.
This will also become a home for things such as standard
deviation ellipses, which can and will be derived very easily from
the Quiver code.
"""
import numpy as np
from numpy import ma
import matplotlib.collections as collections
import matplotlib.transforms as transforms
import matplotlib.text as mtext
import matplotlib.artist as martist
import matplotlib.font_manager as font_manager
from matplotlib.cbook import delete_masked_points
from matplotlib.patches import CirclePolygon
import math
_quiver_doc = """
Plot a 2-D field of arrows.
call signatures::
quiver(U, V, **kw)
quiver(U, V, C, **kw)
quiver(X, Y, U, V, **kw)
quiver(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the arrow locations (default is tail of
arrow; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the arrow vectors
*C*:
an optional array used to map colors to the arrows
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*units*: ['width' | 'height' | 'dots' | 'inches' | 'x' | 'y' ]
arrow units; the arrow dimensions *except for length* are in
multiples of this unit.
* 'width' or 'height': the width or height of the axes
* 'dots' or 'inches': pixels or inches, based on the figure dpi
* 'x' or 'y': *X* or *Y* data units
The arrows scale differently depending on the units. For
'x' or 'y', the arrows get larger as one zooms in; for other
units, the arrow size is independent of the zoom state. For
'width or 'height', the arrow size increases with the width and
height of the axes, respectively, when the the window is resized;
for 'dots' or 'inches', resizing does not change the arrows.
*angles*: ['uv' | 'xy' | array]
With the default 'uv', the arrow aspect ratio is 1, so that
if *U*==*V* the angle of the arrow on the plot is 45 degrees
CCW from the *x*-axis.
With 'xy', the arrow points from (x,y) to (x+u, y+v).
Alternatively, arbitrary angles may be specified as an array
of values in degrees, CCW from the *x*-axis.
*scale*: [ None | float ]
data units per arrow unit, e.g. m/s per plot width; a smaller
scale parameter makes the arrow longer. If *None*, a simple
autoscaling algorithm is used, based on the average vector length
and the number of vectors.
*width*:
shaft width in arrow units; default depends on choice of units,
above, and number of vectors; a typical starting value is about
0.005 times the width of the plot.
*headwidth*: scalar
head width as multiple of shaft width, default is 3
*headlength*: scalar
head length as multiple of shaft width, default is 5
*headaxislength*: scalar
head length at shaft intersection, default is 4.5
*minshaft*: scalar
length below which arrow scales, in units of head length. Do not
set this to less than 1, or small arrows will look terrible!
Default is 1
*minlength*: scalar
minimum length as a multiple of shaft width; if an arrow length
is less than this, plot a dot (hexagon) of this diameter instead.
Default is 1.
*pivot*: [ 'tail' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*.
*color*: [ color | color sequence ]
This is a synonym for the
:class:`~matplotlib.collections.PolyCollection` facecolor kwarg.
If *C* has been set, *color* has no effect.
The defaults give a slightly swept-back arrow; to make the head a
triangle, make *headaxislength* the same as *headlength*. To make the
arrow more pointed, reduce *headwidth* or increase *headlength* and
*headaxislength*. To make the head smaller relative to the shaft,
scale down all the head parameters. You will probably do best to leave
minshaft alone.
linewidths and edgecolors can be used to customize the arrow
outlines. Additional :class:`~matplotlib.collections.PolyCollection`
keyword arguments:
%(PolyCollection)s
""" % martist.kwdocd
_quiverkey_doc = """
Add a key to a quiver plot.
call signature::
quiverkey(Q, X, Y, U, label, **kw)
Arguments:
*Q*:
The Quiver instance returned by a call to quiver.
*X*, *Y*:
The location of the key; additional explanation follows.
*U*:
The length of the key
*label*:
a string with the length and units of the key
Keyword arguments:
*coordinates* = [ 'axes' | 'figure' | 'data' | 'inches' ]
Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
normalized coordinate systems with 0,0 in the lower left and 1,1
in the upper right; 'data' are the axes data coordinates (used for
the locations of the vectors in the quiver plot itself); 'inches'
is position in the figure in inches, with 0,0 at the lower left
corner.
*color*:
overrides face and edge colors from *Q*.
*labelpos* = [ 'N' | 'S' | 'E' | 'W' ]
Position the label above, below, to the right, to the left of the
arrow, respectively.
*labelsep*:
Distance in inches between the arrow and the label. Default is
0.1
*labelcolor*:
defaults to default :class:`~matplotlib.text.Text` color.
*fontproperties*:
A dictionary with keyword arguments accepted by the
:class:`~matplotlib.font_manager.FontProperties` initializer:
*family*, *style*, *variant*, *size*, *weight*
Any additional keyword arguments are used to override vector
properties taken from *Q*.
The positioning of the key depends on *X*, *Y*, *coordinates*, and
*labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position
of the middle of the key arrow. If *labelpos* is 'E', *X*, *Y*
positions the head, and if *labelpos* is 'W', *X*, *Y* positions the
tail; in either of these two cases, *X*, *Y* is somewhere in the
middle of the arrow+label key object.
"""
class QuiverKey(martist.Artist):
""" Labelled arrow for use as a quiver plot scale key.
"""
halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
pivot = {'N': 'mid', 'S': 'mid', 'E': 'tip', 'W': 'tail'}
def __init__(self, Q, X, Y, U, label, **kw):
martist.Artist.__init__(self)
self.Q = Q
self.X = X
self.Y = Y
self.U = U
self.coord = kw.pop('coordinates', 'axes')
self.color = kw.pop('color', None)
self.label = label
self._labelsep_inches = kw.pop('labelsep', 0.1)
self.labelsep = (self._labelsep_inches * Q.ax.figure.dpi)
def on_dpi_change(fig):
self.labelsep = (self._labelsep_inches * fig.dpi)
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
Q.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
self.labelpos = kw.pop('labelpos', 'N')
self.labelcolor = kw.pop('labelcolor', None)
self.fontproperties = kw.pop('fontproperties', dict())
self.kw = kw
_fp = self.fontproperties
#boxprops = dict(facecolor='red')
self.text = mtext.Text(text=label, # bbox=boxprops,
horizontalalignment=self.halign[self.labelpos],
verticalalignment=self.valign[self.labelpos],
fontproperties=font_manager.FontProperties(**_fp))
if self.labelcolor is not None:
self.text.set_color(self.labelcolor)
self._initialized = False
self.zorder = Q.zorder + 0.1
__init__.__doc__ = _quiverkey_doc
def _init(self):
if True: ##not self._initialized:
self._set_transform()
_pivot = self.Q.pivot
self.Q.pivot = self.pivot[self.labelpos]
self.verts = self.Q._make_verts(np.array([self.U]),
np.zeros((1,)))
self.Q.pivot = _pivot
kw = self.Q.polykw
kw.update(self.kw)
self.vector = collections.PolyCollection(self.verts,
offsets=[(self.X,self.Y)],
transOffset=self.get_transform(),
**kw)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self._initialized = True
def _text_x(self, x):
if self.labelpos == 'E':
return x + self.labelsep
elif self.labelpos == 'W':
return x - self.labelsep
else:
return x
def _text_y(self, y):
if self.labelpos == 'N':
return y + self.labelsep
elif self.labelpos == 'S':
return y - self.labelsep
else:
return y
def draw(self, renderer):
self._init()
self.vector.draw(renderer)
x, y = self.get_transform().transform_point((self.X, self.Y))
self.text.set_x(self._text_x(x))
self.text.set_y(self._text_y(y))
self.text.draw(renderer)
def _set_transform(self):
if self.coord == 'data':
self.set_transform(self.Q.ax.transData)
elif self.coord == 'axes':
self.set_transform(self.Q.ax.transAxes)
elif self.coord == 'figure':
self.set_transform(self.Q.ax.figure.transFigure)
elif self.coord == 'inches':
self.set_transform(self.Q.ax.figure.dpi_scale_trans)
else:
raise ValueError('unrecognized coordinates')
def set_figure(self, fig):
martist.Artist.set_figure(self, fig)
self.text.set_figure(fig)
def contains(self, mouseevent):
# Maybe the dictionary should allow one to
# distinguish between a text hit and a vector hit.
if (self.text.contains(mouseevent)[0]
or self.vector.contains(mouseevent)[0]):
return True, {}
return False, {}
quiverkey_doc = _quiverkey_doc
class Quiver(collections.PolyCollection):
"""
Specialized PolyCollection for arrows.
The only API method is set_UVC(), which can be used
to change the size, orientation, and color of the
arrows; their locations are fixed when the class is
instantiated. Possibly this method will be useful
in animations.
Much of the work in this class is done in the draw()
method so that as much information as possible is available
about the plot. In subsequent draw() calls, recalculation
is limited to things that might have changed, so there
should be no performance penalty from putting the calculations
in the draw() method.
"""
def __init__(self, ax, *args, **kw):
self.ax = ax
X, Y, U, V, C = self._parse_args(*args)
self.X = X
self.Y = Y
self.XY = np.hstack((X[:,np.newaxis], Y[:,np.newaxis]))
self.N = len(X)
self.scale = kw.pop('scale', None)
self.headwidth = kw.pop('headwidth', 3)
self.headlength = float(kw.pop('headlength', 5))
self.headaxislength = kw.pop('headaxislength', 4.5)
self.minshaft = kw.pop('minshaft', 1)
self.minlength = kw.pop('minlength', 1)
self.units = kw.pop('units', 'width')
self.angles = kw.pop('angles', 'uv')
self.width = kw.pop('width', None)
self.color = kw.pop('color', 'k')
self.pivot = kw.pop('pivot', 'tail')
kw.setdefault('facecolors', self.color)
kw.setdefault('linewidths', (0,))
collections.PolyCollection.__init__(self, [], offsets=self.XY,
transOffset=ax.transData,
closed=False,
**kw)
self.polykw = kw
self.set_UVC(U, V, C)
self._initialized = False
self.keyvec = None
self.keytext = None
def on_dpi_change(fig):
self._new_UV = True # vertices depend on width, span
# which in turn depend on dpi
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
self.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _quiver_doc
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def _init(self):
"""initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: ##not self._initialized:
trans = self._set_transform()
ax = self.ax
sx, sy = trans.inverted().transform_point(
(ax.bbox.width, ax.bbox.height))
self.span = sx
sn = max(8, min(25, math.sqrt(self.N)))
if self.width is None:
self.width = 0.06 * self.span / sn
def draw(self, renderer):
self._init()
if self._new_UV or self.angles == 'xy':
verts = self._make_verts(self.U, self.V)
self.set_verts(verts, closed=False)
self._new_UV = False
collections.PolyCollection.draw(self, renderer)
def set_UVC(self, U, V, C=None):
self.U = U.ravel()
self.V = V.ravel()
if C is not None:
self.set_array(C.ravel())
self._new_UV = True
def _set_transform(self):
ax = self.ax
if self.units in ('x', 'y'):
if self.units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
dx = dx1/dx0
else:
if self.units == 'width':
dx = ax.bbox.width
elif self.units == 'height':
dx = ax.bbox.height
elif self.units == 'dots':
dx = 1.0
elif self.units == 'inches':
dx = ax.figure.dpi
else:
raise ValueError('unrecognized units')
trans = transforms.Affine2D().scale(dx)
self.set_transform(trans)
return trans
def _angles(self, U, V, eps=0.001):
xy = self.ax.transData.transform(self.XY)
uv = ma.hstack((U[:,np.newaxis], V[:,np.newaxis])).filled(0)
xyp = self.ax.transData.transform(self.XY + eps * uv)
dxy = xyp - xy
ang = ma.arctan2(dxy[:,1], dxy[:,0])
return ang
def _make_verts(self, U, V):
uv = ma.asarray(U+V*1j)
a = ma.absolute(uv)
if self.scale is None:
sn = max(10, math.sqrt(self.N))
scale = 1.8 * a.mean() * sn / self.span # crude auto-scaling
self.scale = scale
length = a/(self.scale*self.width)
X, Y = self._h_arrows(length)
if self.angles == 'xy':
theta = self._angles(U, V).filled(0)[:,np.newaxis]
elif self.angles == 'uv':
theta = np.angle(ma.asarray(uv[..., np.newaxis]).filled(0))
else:
theta = ma.asarray(self.angles*np.pi/180.0).filled(0)
xy = (X+Y*1j) * np.exp(1j*theta)*self.width
xy = xy[:,:,np.newaxis]
XY = ma.concatenate((xy.real, xy.imag), axis=2)
return XY
def _h_arrows(self, length):
""" length is in arrow width units """
# It might be possible to streamline the code
# and speed it up a bit by using complex (x,y)
# instead of separate arrays; but any gain would be slight.
minsh = self.minshaft * self.headlength
N = len(length)
length = length.reshape(N, 1)
# x, y: normal horizontal arrow
x = np.array([0, -self.headaxislength,
-self.headlength, 0], np.float64)
x = x + np.array([0,1,1,1]) * length
y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
y = np.repeat(y[np.newaxis,:], N, axis=0)
# x0, y0: arrow without shaft, for short vectors
x0 = np.array([0, minsh-self.headaxislength,
minsh-self.headlength, minsh], np.float64)
y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
ii = [0,1,2,3,2,1,0]
X = x.take(ii, 1)
Y = y.take(ii, 1)
Y[:, 3:] *= -1
X0 = x0.take(ii)
Y0 = y0.take(ii)
Y0[3:] *= -1
shrink = length/minsh
X0 = shrink * X0[np.newaxis,:]
Y0 = shrink * Y0[np.newaxis,:]
short = np.repeat(length < minsh, 7, axis=1)
#print 'short', length < minsh
# Now select X0, Y0 if short, otherwise X, Y
X = ma.where(short, X0, X)
Y = ma.where(short, Y0, Y)
if self.pivot[:3] == 'mid':
X -= 0.5 * X[:,3, np.newaxis]
elif self.pivot[:3] == 'tip':
X = X - X[:,3, np.newaxis] #numpy bug? using -= does not
# work here unless we multiply
# by a float first, as with 'mid'.
tooshort = length < self.minlength
if tooshort.any():
# Use a heptagonal dot:
th = np.arange(0,7,1, np.float64) * (np.pi/3.0)
x1 = np.cos(th) * self.minlength * 0.5
y1 = np.sin(th) * self.minlength * 0.5
X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
tooshort = ma.repeat(tooshort, 7, 1)
X = ma.where(tooshort, X1, X)
Y = ma.where(tooshort, Y1, Y)
return X, Y
quiver_doc = _quiver_doc
_barbs_doc = """
Plot a 2-D field of barbs.
call signatures::
barb(U, V, **kw)
barb(U, V, C, **kw)
barb(X, Y, U, V, **kw)
barb(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the barb locations
(default is head of barb; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the barb shaft
*C*:
an optional array used to map colors to the barbs
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*length*:
Length of the barb in points; the other parts of the barb
are scaled against this.
Default is 9
*pivot*: [ 'tip' | 'middle' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*. Default is 'tip'
*barbcolor*: [ color | color sequence ]
Specifies the color all parts of the barb except any flags. This
parameter is analagous to the *edgecolor* parameter for polygons,
which can be used instead. However this parameter will override
facecolor.
*flagcolor*: [ color | color sequence ]
Specifies the color of any flags on the barb. This parameter is
analagous to the *facecolor* parameter for polygons, which can be
used instead. However this parameter will override facecolor. If
this is not set (and *C* has not either) then *flagcolor* will be
set to match *barbcolor* so that the barb has a uniform color. If
*C* has been set, *flagcolor* has no effect.
*sizes*:
A dictionary of coefficients specifying the ratio of a given
feature to the length of the barb. Only those values one wishes to
override need to be included. These features include:
- 'spacing' - space between features (flags, full/half barbs)
- 'height' - height (distance from shaft to top) of a flag or
full barb
- 'width' - width of a flag, twice the width of a full barb
- 'emptybarb' - radius of the circle used for low magnitudes
*fill_empty*:
A flag on whether the empty barbs (circles) that are drawn should
be filled with the flag color. If they are not filled, they will
be drawn such that no color is applied to the center. Default is
False
*rounding*:
A flag to indicate whether the vector magnitude should be rounded
when allocating barb components. If True, the magnitude is
rounded to the nearest multiple of the half-barb increment. If
False, the magnitude is simply truncated to the next lowest
multiple. Default is True
*barb_increments*:
A dictionary of increments specifying values to associate with
different parts of the barb. Only those values one wishes to
override need to be included.
- 'half' - half barbs (Default is 5)
- 'full' - full barbs (Default is 10)
- 'flag' - flags (default is 50)
*flip_barb*:
Either a single boolean flag or an array of booleans. Single
boolean indicates whether the lines and flags should point
opposite to normal for all barbs. An array (which should be the
same size as the other data arrays) indicates whether to flip for
each individual barb. Normal behavior is for the barbs and lines
to point right (comes from wind barbs having these features point
towards low pressure in the Northern Hemisphere.) Default is
False
Barbs are traditionally used in meteorology as a way to plot the speed
and direction of wind observations, but can technically be used to
plot any two dimensional vector quantity. As opposed to arrows, which
give vector magnitude by the length of the arrow, the barbs give more
quantitative information about the vector magnitude by putting slanted
lines or a triangle for various increments in magnitude, as show
schematically below::
: /\ \\
: / \ \\
: / \ \ \\
: / \ \ \\
: ------------------------------
.. note the double \\ at the end of each line to make the figure
.. render correctly
The largest increment is given by a triangle (or "flag"). After those
come full lines (barbs). The smallest increment is a half line. There
is only, of course, ever at most 1 half line. If the magnitude is
small and only needs a single half-line and no full lines or
triangles, the half-line is offset from the end of the barb so that it
can be easily distinguished from barbs with a single full line. The
magnitude for the barb shown above would nominally be 65, using the
standard increments of 50, 10, and 5.
linewidths and edgecolors can be used to customize the barb.
Additional :class:`~matplotlib.collections.PolyCollection` keyword
arguments:
%(PolyCollection)s
""" % martist.kwdocd
class Barbs(collections.PolyCollection):
'''
Specialized PolyCollection for barbs.
The only API method is :meth:`set_UVC`, which can be used to
change the size, orientation, and color of the arrows. Locations
are changed using the :meth:`set_offsets` collection method.
Possibly this method will be useful in animations.
There is one internal function :meth:`_find_tails` which finds
exactly what should be put on the barb given the vector magnitude.
From there :meth:`_make_barbs` is used to find the vertices of the
polygon to represent the barb based on this information.
'''
#This may be an abuse of polygons here to render what is essentially maybe
#1 triangle and a series of lines. It works fine as far as I can tell
#however.
def __init__(self, ax, *args, **kw):
self._pivot = kw.pop('pivot', 'tip')
self._length = kw.pop('length', 7)
barbcolor = kw.pop('barbcolor', None)
flagcolor = kw.pop('flagcolor', None)
self.sizes = kw.pop('sizes', dict())
self.fill_empty = kw.pop('fill_empty', False)
self.barb_increments = kw.pop('barb_increments', dict())
self.rounding = kw.pop('rounding', True)
self.flip = kw.pop('flip_barb', False)
#Flagcolor and and barbcolor provide convenience parameters for setting
#the facecolor and edgecolor, respectively, of the barb polygon. We
#also work here to make the flag the same color as the rest of the barb
#by default
if None in (barbcolor, flagcolor):
kw['edgecolors'] = 'face'
if flagcolor:
kw['facecolors'] = flagcolor
elif barbcolor:
kw['facecolors'] = barbcolor
else:
#Set to facecolor passed in or default to black
kw.setdefault('facecolors', 'k')
else:
kw['edgecolors'] = barbcolor
kw['facecolors'] = flagcolor
#Parse out the data arrays from the various configurations supported
x, y, u, v, c = self._parse_args(*args)
self.x = x
self.y = y
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
#Make a collection
barb_size = self._length**2 / 4 #Empirically determined
collections.PolyCollection.__init__(self, [], (barb_size,), offsets=xy,
transOffset=ax.transData, **kw)
self.set_transform(transforms.IdentityTransform())
self.set_UVC(u, v, c)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _barbs_doc
def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
'''
Find how many of each of the tail pieces is necessary. Flag
specifies the increment for a flag, barb for a full barb, and half for
half a barb. Mag should be the magnitude of a vector (ie. >= 0).
This returns a tuple of:
(*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)
*half_flag* is a boolean whether half of a barb is needed,
since there should only ever be one half on a given
barb. *empty_flag* flag is an array of flags to easily tell if
a barb is empty (too low to plot any barbs/flags.
'''
#If rounding, round to the nearest multiple of half, the smallest
#increment
if rounding:
mag = half * (mag / half + 0.5).astype(np.int)
num_flags = np.floor(mag / flag).astype(np.int)
mag = np.mod(mag, flag)
num_barb = np.floor(mag / full).astype(np.int)
mag = np.mod(mag, full)
half_flag = mag >= half
empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))
return num_flags, num_barb, half_flag, empty_flag
def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
pivot, sizes, fill_empty, flip):
'''
This function actually creates the wind barbs. *u* and *v*
are components of the vector in the *x* and *y* directions,
respectively.
*nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
*respectively, the number of flags, number of barbs, flag for
*half a barb, and flag for empty barb, ostensibly obtained
*from :meth:`_find_tails`.
*length* is the length of the barb staff in points.
*pivot* specifies the point on the barb around which the
entire barb should be rotated. Right now, valid options are
'head' and 'middle'.
*sizes* is a dictionary of coefficients specifying the ratio
of a given feature to the length of the barb. These features
include:
- *spacing*: space between features (flags, full/half
barbs)
- *height*: distance from shaft of top of a flag or full
barb
- *width* - width of a flag, twice the width of a full barb
- *emptybarb* - radius of the circle used for low
magnitudes
*fill_empty* specifies whether the circle representing an
empty barb should be filled or not (this changes the drawing
of the polygon).
*flip* is a flag indicating whether the features should be flipped to
the other side of the barb (useful for winds in the southern
hemisphere.
This function returns list of arrays of vertices, defining a polygon for
each of the wind barbs. These polygons have been rotated to properly
align with the vector direction.
'''
#These control the spacing and size of barb elements relative to the
#length of the shaft
spacing = length * sizes.get('spacing', 0.125)
full_height = length * sizes.get('height', 0.4)
full_width = length * sizes.get('width', 0.25)
empty_rad = length * sizes.get('emptybarb', 0.15)
#Controls y point where to pivot the barb.
pivot_points = dict(tip=0.0, middle=-length/2.)
#Check for flip
if flip: full_height = -full_height
endx = 0.0
endy = pivot_points[pivot.lower()]
#Get the appropriate angle for the vector components. The offset is due
#to the way the barb is initially drawn, going down the y-axis. This
#makes sense in a meteorological mode of thinking since there 0 degrees
#corresponds to north (the y-axis traditionally)
angles = -(ma.arctan2(v, u) + np.pi/2)
#Used for low magnitude. We just get the vertices, so if we make it
#out here, it can be reused. The center set here should put the
#center of the circle at the location(offset), rather than at the
#same point as the barb pivot; this seems more sensible.
circ = CirclePolygon((0,0), radius=empty_rad).get_verts()
if fill_empty:
empty_barb = circ
else:
#If we don't want the empty one filled, we make a degenerate polygon
#that wraps back over itself
empty_barb = np.concatenate((circ, circ[::-1]))
barb_list = []
for index, angle in np.ndenumerate(angles):
#If the vector magnitude is too weak to draw anything, plot an
#empty circle instead
if empty_flag[index]:
#We can skip the transform since the circle has no preferred
#orientation
barb_list.append(empty_barb)
continue
poly_verts = [(endx, endy)]
offset = length
#Add vertices for each flag
for i in range(nflags[index]):
#The spacing that works for the barbs is a little to much for
#the flags, but this only occurs when we have more than 1 flag.
if offset != length: offset += spacing / 2.
poly_verts.extend([[endx, endy + offset],
[endx + full_height, endy - full_width/2 + offset],
[endx, endy - full_width + offset]])
offset -= full_width + spacing
#Add vertices for each barb. These really are lines, but works
#great adding 3 vertices that basically pull the polygon out and
#back down the line
for i in range(nbarbs[index]):
poly_verts.extend([(endx, endy + offset),
(endx + full_height, endy + offset + full_width/2),
(endx, endy + offset)])
offset -= spacing
#Add the vertices for half a barb, if needed
if half_barb[index]:
#If the half barb is the first on the staff, traditionally it is
#offset from the end to make it easy to distinguish from a barb
#with a full one
if offset == length:
poly_verts.append((endx, endy + offset))
offset -= 1.5 * spacing
poly_verts.extend([(endx, endy + offset),
(endx + full_height/2, endy + offset + full_width/4),
(endx, endy + offset)])
#Rotate the barb according the angle. Making the barb first and then
#rotating it made the math for drawing the barb really easy. Also,
#the transform framework makes doing the rotation simple.
poly_verts = transforms.Affine2D().rotate(-angle).transform(
poly_verts)
barb_list.append(poly_verts)
return barb_list
#Taken shamelessly from Quiver
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def set_UVC(self, U, V, C=None):
self.u = ma.asarray(U).ravel()
self.v = ma.asarray(V).ravel()
if C is not None:
c = ma.asarray(C).ravel()
x,y,u,v,c = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v, c)
else:
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
magnitude = np.sqrt(u*u + v*v)
flags, barbs, halves, empty = self._find_tails(magnitude,
self.rounding, **self.barb_increments)
#Get the vertices for each of the barbs
plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
self._length, self._pivot, self.sizes, self.fill_empty, self.flip)
self.set_verts(plot_barbs)
#Set the color array
if C is not None:
self.set_array(c)
#Update the offsets in case the masked data changed
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
self._offsets = xy
def set_offsets(self, xy):
'''
Set the offsets for the barb polygons. This saves the offets passed in
and actually sets version masked as appropriate for the existing U/V
data. *offsets* should be a sequence.
ACCEPTS: sequence of pairs of floats
'''
self.x = xy[:,0]
self.y = xy[:,1]
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(), self.u,
self.v)
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
collections.PolyCollection.set_offsets(self, xy)
set_offsets.__doc__ = collections.PolyCollection.set_offsets.__doc__
barbs_doc = _barbs_doc
| gpl-3.0 |
mhdella/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
pcubillos/MCcubed | MCcubed/plots/mcplots.py | 1 | 20081 | # Copyright (c) 2015-2019 Patricio Cubillos and contributors.
# MC3 is open-source software under the MIT license (see LICENSE).
__all__ = ["trace", "pairwise", "histogram", "RMS", "modelfit", "subplotter"]
import os
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.interpolate as si
from . import colormaps as cm
from .. import utils as mu
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../lib')
import binarray as ba
if sys.version_info.major == 2:
range = xrange
def trace(posterior, Zchain=None, pnames=None, thinning=1,
burnin=0, fignum=100, savefile=None, fmt=".", ms=2.5, fs=11):
"""
Plot parameter trace MCMC sampling.
Parameters
----------
posterior: 2D float ndarray
An MCMC posterior sampling with dimension: [nsamples, npars].
Zchain: 1D integer ndarray
the chain index for each posterior sample.
pnames: Iterable (strings)
Label names for parameters.
thinning: Integer
Thinning factor for plotting (plot every thinning-th value).
burnin: Integer
Thinned burn-in number of iteration (only used when Zchain is not None).
fignum: Integer
The figure number.
savefile: Boolean
If not None, name of file to save the plot.
fmt: String
The format string for the line and marker.
ms: Float
Marker size.
fs: Float
Fontsize of texts.
Returns
-------
axes: 1D axes ndarray
The array of axes containing the marginal posterior distributions.
Uncredited Developers
---------------------
Kevin Stevenson (UCF)
"""
# Get indices for samples considered in final analysis:
if Zchain is not None:
nchains = np.amax(Zchain) + 1
good = np.zeros(len(Zchain), bool)
for c in range(nchains):
good[np.where(Zchain == c)[0][burnin:]] = True
# Values accepted for posterior stats:
posterior = posterior[good]
Zchain = Zchain [good]
# Sort the posterior by chain:
zsort = np.lexsort([Zchain])
posterior = posterior[zsort]
Zchain = Zchain [zsort]
# Get location for chains separations:
xsep = np.where(np.ediff1d(Zchain[0::thinning]))[0]
# Get number of parameters and length of chain:
nsamples, npars = np.shape(posterior)
# Number of samples (thinned):
xmax = len(posterior[0::thinning])
# Set default parameter names:
if pnames is None:
pnames = mu.default_parnames(npars)
npanels = 12 # Max number of panels per page
npages = int(1 + (npars-1)/npanels)
# Make the trace plot:
axes = []
i = 0
for page in range(npages):
fig = plt.figure(page, figsize=(8.5,11.0))
plt.clf()
plt.subplots_adjust(left=0.15, right=0.95, bottom=0.05, top=0.97,
hspace=0.15)
while i < npars:
ax = plt.subplot(npanels, 1, i%npanels+1)
axes.append(ax)
ax.plot(posterior[0::thinning,i], fmt, ms=ms)
yran = ax.get_ylim()
if Zchain is not None:
ax.vlines(xsep, yran[0], yran[1], "0.5")
# Y-axis adjustments:
ax.set_ylim(yran)
ax.locator_params(axis='y', nbins=5, tight=True)
ax.tick_params(labelsize=fs-1)
ax.set_ylabel(pnames[i], size=fs, multialignment='center')
# X-axis adjustments:
ax.set_xlim(0, xmax)
ax.get_xaxis().set_visible(False)
i += 1
if i%npanels == 0:
break
ax.set_xlabel('MCMC sample', size=fs)
ax.get_xaxis().set_visible(True)
if savefile is not None:
if npages > 1:
sf = os.path.splitext(savefile)
try:
bbox = fig.get_tightbbox(fig._cachedRenderer).padded(0.1)
bbox_points = bbox.get_points()
bbox_points[:,0] = 0.0, 8.5
bbox.set_points(bbox_points)
except: # May fail for ssh connection without X display
ylow = 9.479 - 0.862*np.amin([npanels-1, npars-npanels*page-1])
bbox = mpl.transforms.Bbox([[0.0, ylow], [8.5, 11]])
fig.savefig("{:s}_page{:02d}{:s}".format(sf[0], page, sf[1]),
bbox_inches=bbox)
else:
fig.savefig(savefile, bbox_inches='tight')
return axes
def pairwise(posterior, pnames=None, thinning=1, fignum=200,
savefile=None, bestp=None, nbins=35, nlevels=20,
absolute_dens=False, ranges=None, fs=11, rect=None, margin=0.01):
"""
Plot parameter pairwise posterior distributions.
Parameters
----------
posterior: 2D ndarray
An MCMC posterior sampling with dimension: [nsamples, nparameters].
pnames: Iterable (strings)
Label names for parameters.
thinning: Integer
Thinning factor for plotting (plot every thinning-th value).
fignum: Integer
The figure number.
savefile: Boolean
If not None, name of file to save the plot.
bestp: 1D float ndarray
If not None, plot the best-fitting values for each parameter
given by bestp.
nbins: Integer
The number of grid bins for the 2D histograms.
nlevels: Integer
The number of contour color levels.
ranges: List of 2-element arrays
List with custom (lower,upper) x-ranges for each parameter.
Leave None for default, e.g., ranges=[(1.0,2.0), None, (0, 1000)].
fs: Float
Fontsize of texts.
rect: 1D list/ndarray
If not None, plot the pairwise plots in current figure, within the
ranges defined by rect (xleft, ybottom, xright, ytop).
margin: Float
Margins between panels (when rect is not None).
Returns
-------
axes: 2D axes ndarray
The grid of axes containing the pairwise posterior distributions.
cb: axes
The colorbar axes instance.
Notes
-----
Note that rect delimits the boundaries of the panels. The labels and
ticklabels will appear right outside rect, so the user needs to leave
some wiggle room for them.
Uncredited Developers
---------------------
Kevin Stevenson (UCF)
Ryan Hardy (UCF)
"""
# Get number of parameters and length of chain:
nsamples, npars = np.shape(posterior)
# Don't plot if there are no pairs:
if npars == 1:
return
if ranges is None:
ranges = np.repeat(None, npars)
else: # Set default ranges if necessary:
for i in range(npars):
if ranges[i] is None:
ranges[i] = (np.nanmin(posterior[0::thinning,i]),
np.nanmax(posterior[0::thinning,i]))
# Set default parameter names:
if pnames is None:
pnames = mu.default_parnames(npars)
# Set palette color:
palette = cm.viridis_r
palette.set_under(color='w')
palette.set_bad(color='w')
# Gather 2D histograms:
hist = []
xran, yran, lmax = [], [], []
for j in range(1, npars): # Rows
for i in range(j): # Columns
ran = None
if ranges[i] is not None:
ran = [ranges[i], ranges[j]]
h,x,y = np.histogram2d(posterior[0::thinning,i],
posterior[0::thinning,j], bins=nbins, range=ran, normed=False)
hist.append(h.T)
xran.append(x)
yran.append(y)
lmax.append(np.amax(h)+1)
# Reset upper boundary to absolute maximum value if requested:
if absolute_dens:
lmax = npars*(npars+1)*2 * [np.amax(lmax)]
if rect is None:
rect = (0.15, 0.15, 0.95, 0.95)
plt.figure(fignum, figsize=(8,8))
plt.clf()
axes = np.tile(None, (npars-1, npars-1))
# Plot:
k = 0 # Histogram index
for j in range(1, npars): # Rows
for i in range(j): # Columns
h = (npars-1)*(j-1) + i + 1 # Subplot index
ax = axes[i,j-1] = subplotter(rect, margin, h, npars-1)
# Labels:
ax.tick_params(labelsize=fs-1)
if i == 0:
ax.set_ylabel(pnames[j], size=fs)
else:
ax.get_yaxis().set_visible(False)
if j == npars-1:
ax.set_xlabel(pnames[i], size=fs)
plt.setp(ax.xaxis.get_majorticklabels(), rotation=90)
else:
ax.get_xaxis().set_visible(False)
# The plot:
a = ax.contourf(hist[k], cmap=palette, vmin=1, origin='lower',
levels=[0]+list(np.linspace(1,lmax[k], nlevels)),
extent=(xran[k][0], xran[k][-1], yran[k][0], yran[k][-1]))
for c in a.collections:
c.set_edgecolor("face")
if bestp is not None:
ax.axvline(bestp[i], dashes=(6,4), color="0.5", lw=1.0)
ax.axhline(bestp[j], dashes=(6,4), color="0.5", lw=1.0)
if ranges[i] is not None:
ax.set_xlim(ranges[i])
if ranges[i] is not None:
ax.set_ylim(ranges[j])
k += 1
# The colorbar:
bounds = np.linspace(0, 1.0, nlevels)
norm = mpl.colors.BoundaryNorm(bounds, palette.N)
if rect is not None:
dx = (rect[2]-rect[0])*0.05
dy = (rect[3]-rect[1])*0.45
ax2 = plt.axes([rect[2]-dx, rect[3]-dy, dx, dy])
else:
ax2 = plt.axes([0.85, 0.57, 0.025, 0.36])
cb = mpl.colorbar.ColorbarBase(ax2, cmap=palette, norm=norm,
spacing='proportional', boundaries=bounds, format='%.1f')
cb.set_label("Posterior density", fontsize=fs)
cb.ax.yaxis.set_ticks_position('left')
cb.ax.yaxis.set_label_position('left')
cb.ax.tick_params(labelsize=fs-1)
cb.set_ticks(np.linspace(0, 1, 5))
for c in ax2.collections:
c.set_edgecolor("face")
plt.draw()
# Save file:
if savefile is not None:
plt.savefig(savefile)
return axes, cb
def histogram(posterior, pnames=None, thinning=1, fignum=300,
savefile=None, bestp=None, percentile=None, pdf=None,
xpdf=None, ranges=None, axes=None, lw=2.0, fs=11):
"""
Plot parameter marginal posterior distributions
Parameters
----------
posterior: 1D or 2D float ndarray
An MCMC posterior sampling with dimension [nsamples] or
[nsamples, nparameters].
pnames: Iterable (strings)
Label names for parameters.
thinning: Integer
Thinning factor for plotting (plot every thinning-th value).
fignum: Integer
The figure number.
savefile: Boolean
If not None, name of file to save the plot.
bestp: 1D float ndarray
If not None, plot the best-fitting values for each parameter
given by bestp.
percentile: Float
If not None, plot the percentile- highest posterior density region
of the distribution. Note that this should actually be the
fractional part, i.e. set percentile=0.68 for a 68% HPD.
pdf: 1D float ndarray or list of ndarrays
A smoothed PDF of the distribution for each parameter.
xpdf: 1D float ndarray or list of ndarrays
The X coordinates of the PDFs.
ranges: List of 2-element arrays
List with custom (lower,upper) x-ranges for each parameter.
Leave None for default, e.g., ranges=[(1.0,2.0), None, (0, 1000)].
axes: List of matplotlib.axes
If not None, plot histograms in the currently existing axes.
lw: Float
Linewidth of the histogram contour.
fs: Float
Font size for texts.
Returns
-------
axes: 1D axes ndarray
The array of axes containing the marginal posterior distributions.
Uncredited Developers
---------------------
Kevin Stevenson (UCF)
"""
if np.ndim(posterior) == 1:
posterior = np.expand_dims(posterior, axis=1)
nsamples, npars = np.shape(posterior)
if pdf is None: # Make list of Nones
pdf = [None]*npars
xpdf = [None]*npars
if not isinstance(pdf, list): # Put single arrays into list
pdf = [pdf]
xpdf = [xpdf]
# Histogram keywords depending whether one wants the HPD or not:
hkw = {'edgecolor':'navy', 'color':'b'}
# Bestfit keywords:
bkw = {'zorder':2, 'color':'orange'}
if percentile is not None:
hkw = {'histtype':'step', 'lw':lw, 'edgecolor':'b'}
bkw = {'zorder':-1, 'color':'red'}
# Set default parameter names:
if pnames is None:
pnames = mu.default_parnames(npars)
# Xranges:
if ranges is None:
ranges = np.repeat(None, npars)
# Set number of rows:
nrows, ncolumns, npanels = 4, 3, 12
npages = int(1 + (npars-1)/npanels)
if axes is None:
newfig = True
axes = []
else:
newfig = False
npages = 1 # Assume there's only one page
figs = np.tile(None, npages)
maxylim = 0 # Max Y limit
i = 0
for j in range(npages):
if newfig:
figs[j] = plt.figure(fignum+j, figsize=(8.5, 11.0))
plt.clf()
plt.subplots_adjust(left=0.1, right=0.97, bottom=0.08, top=0.98,
hspace=0.5, wspace=0.1)
else:
figs[j] = axes[0].get_figure()
while i < npars:
if newfig:
ax = plt.subplot(nrows, ncolumns, i%npanels+1)
axes.append(ax)
if i%ncolumns == 0:
ax.set_ylabel(r"$N$ samples", fontsize=fs)
else:
ax.get_yaxis().set_visible(False)
else:
ax = axes[i]
ax.get_yaxis().set_visible(False) # No ylabel/yticklabels by default
ax.tick_params(labelsize=fs-1)
plt.setp(ax.xaxis.get_majorticklabels(), rotation=90)
ax.set_xlabel(pnames[i], size=fs)
vals, bins, h = ax.hist(posterior[0::thinning, i], bins=25,
range=ranges[i], normed=False, zorder=0, **hkw)
# Plot HPD region:
if percentile is not None:
PDF, Xpdf, HPDmin = mu.credregion(posterior[:,i], percentile,
pdf[i], xpdf[i])
vals = np.r_[0, vals, 0]
bins = np.r_[bins[0] - (bins[1]-bins[0]), bins]
# interpolate xpdf into the histogram:
f = si.interp1d(bins+0.5*(bins[1]-bins[0]), vals, kind='nearest')
# Plot the HPD region as shaded areas:
if ranges[i] is not None:
xran = np.argwhere((Xpdf>ranges[i][0]) & (Xpdf<ranges[i][1]))
Xpdf = Xpdf[np.amin(xran):np.amax(xran)]
PDF = PDF [np.amin(xran):np.amax(xran)]
ax.fill_between(Xpdf, 0, f(Xpdf), where=PDF>=HPDmin,
facecolor='0.75', edgecolor='none', interpolate=False, zorder=-2)
if bestp is not None:
ax.axvline(bestp[i], dashes=(7,4), lw=1.0, **bkw)
maxylim = np.amax((maxylim, ax.get_ylim()[1]))
i += 1
if i%npanels == 0:
break
# Set uniform height and save:
for ax in axes:
ax.set_ylim(0, maxylim)
# Save:
if savefile is not None:
for j in range(npages):
if npages > 1:
sf = os.path.splitext(savefile)
figs[j].savefig("{:s}_page{:02d}{:s}".format(sf[0], j+1, sf[1]),
bbox_inches='tight')
else:
figs[j].savefig(savefile, bbox_inches='tight')
return axes
def RMS(binsz, rms, stderr, rmslo, rmshi, cadence=None, binstep=1,
timepoints=[], ratio=False, fignum=-40,
yran=None, xran=None, savefile=None):
"""
Plot the RMS vs binsize curve.
Parameters
----------
binsz: 1D ndarray
Array of bin sizes.
rms: 1D ndarray
RMS of dataset at given binsz.
stderr: 1D ndarray
Gaussian-noise rms Extrapolation
rmslo: 1D ndarray
RMS lower uncertainty
rmshi: 1D ndarray
RMS upper uncertainty
cadence: Float
Time between datapoints in seconds.
binstep: Integer
Plot every-binstep point.
timepoints: List
Plot a vertical line at each time-points.
ratio: Boolean
If True, plot rms/stderr, else, plot both curves.
fignum: Integer
Figure number
yran: 2-elements tuple
Minimum and Maximum y-axis ranges.
xran: 2-elements tuple
Minimum and Maximum x-axis ranges.
savefile: String
If not None, name of file to save the plot.
"""
if np.size(rms) <= 1:
return
# Set cadence:
if cadence is None:
cadence = 1.0
xlabel = "Bin size"
else:
xlabel = "Bin size (sec)"
# Set plotting limits:
if yran is None:
#yran = np.amin(rms), np.amax(rms)
yran = [np.amin(rms-rmslo), np.amax(rms+rmshi)]
yran[0] = np.amin([yran[0],stderr[-1]])
if ratio:
yran = [0, np.amax(rms/stderr) + 1.0]
if xran is None:
xran = [cadence, np.amax(binsz*cadence)]
fs = 14 # Font size
if ratio:
ylabel = r"$\beta$ = RMS / std error"
else:
ylabel = "RMS"
plt.figure(fignum, (8,6))
plt.clf()
ax = plt.subplot(111)
if ratio: # Plot the residuals-to-Gaussian RMS ratio:
ax.errorbar(binsz[::binstep]*cadence, (rms/stderr)[::binstep],
yerr=[(rmslo/stderr)[::binstep], (rmshi/stderr)[::binstep]],
fmt='k-', ecolor='0.5', capsize=0, label="__nolabel__")
ax.semilogx(xran, [1,1], "r-", lw=2)
else: # Plot residuals and Gaussian RMS individually:
# Residuals RMS:
ax.errorbar(binsz[::binstep]*cadence, rms[::binstep],
yerr=[rmslo[::binstep], rmshi[::binstep]],
fmt='k-', ecolor='0.5', capsize=0, label="RMS")
# Gaussian noise projection:
ax.loglog(binsz*cadence, stderr, color='red', ls='-',
lw=2, label="Gaussian std.")
ax.legend(loc="best")
for time in timepoints:
ax.vlines(time, yran[0], yran[1], 'b', 'dashed', lw=2)
ax.tick_params(labelsize=fs-1)
ax.set_ylim(yran)
ax.set_xlim(xran)
ax.set_ylabel(ylabel, fontsize=fs)
ax.set_xlabel(xlabel, fontsize=fs)
if savefile is not None:
plt.savefig(savefile)
def modelfit(data, uncert, indparams, model, nbins=75,
fignum=-50, savefile=None, fmt="."):
"""
Plot the binned dataset with given uncertainties and model curves
as a function of indparams.
In a lower panel, plot the residuals bewteen the data and model.
Parameters
----------
data: 1D float ndarray
Input data set.
uncert: 1D float ndarray
One-sigma uncertainties of the data points.
indparams: 1D float ndarray
Independent variable (X axis) of the data points.
model: 1D float ndarray
Model of data.
nbins: Integer
Number of bins in the output plot.
fignum: Integer
The figure number.
savefile: Boolean
If not None, name of file to save the plot.
fmt: String
Format of the plotted markers.
"""
# Bin down array:
binsize = int((np.size(data)-1)/nbins + 1)
bindata, binuncert, binindp = ba.binarray(data, uncert, indparams, binsize)
binmodel = ba.weightedbin(model, binsize)
fs = 12 # Font-size
plt.figure(fignum, figsize=(8,6))
plt.clf()
# Residuals:
rax = plt.axes([0.15, 0.1, 0.8, 0.2])
rax.errorbar(binindp, bindata-binmodel, binuncert, fmt='ko', ms=4)
rax.plot([indparams[0], indparams[-1]], [0,0],'k:',lw=1.5)
rax.tick_params(labelsize=fs-1)
rax.set_xlabel("x", fontsize=fs)
rax.set_ylabel('Residuals', fontsize=fs)
# Data and Model:
ax = plt.axes([0.15, 0.35, 0.8, 0.55])
ax.errorbar(binindp, bindata, binuncert, fmt='ko', ms=4,
label='Binned Data')
ax.plot(indparams, model, "b", lw=2, label='Best Fit')
ax.get_xaxis().set_visible(False)
ax.tick_params(labelsize=fs-1)
ax.set_ylabel('y', fontsize=fs)
ax.legend(loc='best')
if savefile is not None:
plt.savefig(savefile)
def subplotter(rect, margin, ipan, nx, ny=None, ymargin=None):
"""
Create an axis instance for one panel (with index ipan) of a grid
of npanels, where the grid located inside rect (xleft, ybottom,
xright, ytop).
Parameters
----------
rect: 1D List/ndarray
Rectangle with xlo, ylo, xhi, yhi positions of the grid boundaries.
margin: Float
Width of margin between panels.
ipan: Integer
Index of panel to create (as in plt.subplots).
nx: Integer
Number of panels along the x axis.
ny: Integer
Number of panels along the y axis. If None, assume ny=nx.
ymargin: Float
Width of margin between panels along y axes (if None, adopt margin).
Returns
-------
axes: axes instance
A matplotlib axes instance at the specified position.
"""
if ny is None:
ny = nx
if ymargin is None:
ymargin = margin
# Size of a panel:
Dx = rect[2] - rect[0]
Dy = rect[3] - rect[1]
dx = Dx/nx - (nx-1.0)* margin/nx
dy = Dy/ny - (ny-1.0)*ymargin/ny
# Position of panel ipan:
# Follow plt's scheme, where panel 1 is at the top left panel,
# panel 2 is to the right of panel 1, and so on:
xloc = (ipan-1) % nx
yloc = (ny-1) - ((ipan-1) // nx)
# Bottom-left corner of panel:
xpanel = rect[0] + xloc*(dx+ margin)
ypanel = rect[1] + yloc*(dy+ymargin)
return plt.axes([xpanel, ypanel, dx, dy])
| mit |
tallakahath/pymatgen | pymatgen/electronic_structure/plotter.py | 1 | 139849 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import logging
import math
import itertools
import warnings
from collections import OrderedDict
import numpy as np
from matplotlib import patches
from monty.json import jsanitize
from pymatgen import Element
from pymatgen.electronic_structure.core import Spin, Orbital, OrbitalType
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
from pymatgen.util.plotting import pretty_plot, \
add_fig_kwargs, get_ax3d_fig_plt
from pymatgen.core.units import Energy
from pymatgen.electronic_structure.boltztrap import BoltztrapError
from pymatgen.symmetry.bandstructure import HighSymmKpath
"""
This module implements plotter for DOS and band structure.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Anubhav Jain"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "May 1, 2012"
logger = logging.getLogger(__name__)
class DosPlotter(object):
"""
Class for plotting DOSs. Note that the interface is extremely flexible
given that there are many different ways in which people want to view
DOS. The typical usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = DosPlotter()
# Adds a DOS with a label.
plotter.add_dos("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompleteDos.get_spd/element/others_dos().
plotter.add_dos_dict({"dos1": dos1, "dos2": dos2})
plotter.add_dos_dict(complete_dos.get_spd_dos())
Args:
zero_at_efermi: Whether to shift all Dos to have zero energy at the
fermi energy. Defaults to True.
stack: Whether to plot the DOS as a stacked area graph
key_sort_func: function used to sort the dos_dict keys.
sigma: A float specifying a standard deviation for Gaussian smearing
the DOS for nicer looking plots. Defaults to None for no
smearing.
"""
def __init__(self, zero_at_efermi=True, stack=False, sigma=None):
self.zero_at_efermi = zero_at_efermi
self.stack = stack
self.sigma = sigma
self._doses = OrderedDict()
def add_dos(self, label, dos):
"""
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
Dos object
"""
energies = dos.energies - dos.efermi if self.zero_at_efermi \
else dos.energies
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
efermi = dos.efermi
self._doses[label] = {'energies': energies, 'densities': densities,
'efermi': efermi}
def add_dos_dict(self, dos_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label])
def get_dos_dict(self):
"""
Returns the added doses as a json-serializable dict. Note that if you
have specified smearing for the DOS plot, the densities returned will
be the smeared densities, not the original densities.
Returns:
Dict of dos data. Generally of the form, {label: {'energies':..,
'densities': {'up':...}, 'efermi':efermi}}
"""
return jsanitize(self._doses)
def get_plot(self, xlim=None, ylim=None):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
import palettable
colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors
y = None
alldensities = []
allenergies = []
plt = pretty_plot(12, 8)
# Note that this complicated processing of energies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
energies = dos['energies']
densities = dos['densities']
if not y:
y = {Spin.up: np.zeros(energies.shape),
Spin.down: np.zeros(energies.shape)}
newdens = {}
for spin in [Spin.up, Spin.down]:
if spin in densities:
if self.stack:
y[spin] += densities[spin]
newdens[spin] = y[spin].copy()
else:
newdens[spin] = densities[spin]
allenergies.append(energies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allenergies.reverse()
allpts = []
for i, key in enumerate(keys):
x = []
y = []
for spin in [Spin.up, Spin.down]:
if spin in alldensities[i]:
densities = list(int(spin) * alldensities[i][spin])
energies = list(allenergies[i])
if spin == Spin.down:
energies.reverse()
densities.reverse()
x.extend(energies)
y.extend(densities)
allpts.extend(list(zip(x, y)))
if self.stack:
plt.fill(x, y, color=colors[i % ncolors],
label=str(key))
else:
plt.plot(x, y, color=colors[i % ncolors],
label=str(key), linewidth=3)
if not self.zero_at_efermi:
ylim = plt.ylim()
plt.plot([self._doses[key]['efermi'],
self._doses[key]['efermi']], ylim,
color=colors[i % ncolors],
linestyle='--', linewidth=2)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
if self.zero_at_efermi:
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel('Energies (eV)')
plt.ylabel('Density of states')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.savefig(filename, format=img_format)
def show(self, xlim=None, ylim=None):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.show()
class BSPlotter(object):
"""
Class to plot or get data to facilitate the plot of band structure objects.
Args:
bs: A BandStructureSymmLine object.
"""
def __init__(self, bs):
if not isinstance(bs, BandStructureSymmLine):
raise ValueError(
"BSPlotter only works with BandStructureSymmLine objects. "
"A BandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)")
self._bs = bs
# TODO: come with an intelligent way to cut the highest unconverged
# bands
self._nb_bands = self._bs.nb_bands
def _maketicks(self, plt):
"""
utility private method to add ticks to a band structure
"""
ticks = self.get_ticks()
# Sanitize only plot the uniq values
uniq_d = []
uniq_l = []
temp_ticks = list(zip(ticks['distance'], ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(ticks['label'])):
if ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if ticks['label'][i] == ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(
i=ticks['label'][i]))
else:
logger.debug("Adding a line at {d}"
" for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
return plt
def bs_plot_data(self, zero_to_efermi=True):
"""
Get the data nicely formatted for a plot
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from the
eigenvalues and plot.
Returns:
A dict of the following format:
ticks: A dict with the 'distances' at which there is a kpoint (the
x axis) and the labels (None if no label)
energy: A dict storing bands for spin up and spin down data
[{Spin:[band_index][k_point_index]}] as a list (one element
for each branch) of energy for each kpoint. The data is
stored by branch to facilitate the plotting
vbm: A list of tuples (distance,energy) marking the vbms. The
energies are shifted with respect to the fermi level is the
option has been selected.
cbm: A list of tuples (distance,energy) marking the cbms. The
energies are shifted with respect to the fermi level is the
option has been selected.
lattice: The reciprocal lattice.
zero_energy: This is the energy used as zero for the plot.
band_gap:A string indicating the band gap and its nature (empty if
it's a metal).
is_metal: True if the band structure is metallic (i.e., there is at
least one band crossing the fermi level).
"""
distance = []
energy = []
if self._bs.is_metal():
zero_energy = self._bs.efermi
else:
zero_energy = self._bs.get_vbm()['energy']
if not zero_to_efermi:
zero_energy = 0.0
for b in self._bs.branches:
if self._bs.is_spin_polarized:
energy.append({str(Spin.up): [], str(Spin.down): []})
else:
energy.append({str(Spin.up): []})
distance.append([self._bs.distance[j]
for j in range(b['start_index'],
b['end_index'] + 1)])
ticks = self.get_ticks()
for i in range(self._nb_bands):
energy[-1][str(Spin.up)].append(
[self._bs.bands[Spin.up][i][j] - zero_energy
for j in range(b['start_index'], b['end_index'] + 1)])
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
energy[-1][str(Spin.down)].append(
[self._bs.bands[Spin.down][i][j] - zero_energy
for j in range(b['start_index'], b['end_index'] + 1)])
vbm = self._bs.get_vbm()
cbm = self._bs.get_cbm()
vbm_plot = []
cbm_plot = []
for index in cbm['kpoint_index']:
cbm_plot.append((self._bs.distance[index],
cbm['energy'] - zero_energy if zero_to_efermi
else cbm['energy']))
for index in vbm['kpoint_index']:
vbm_plot.append((self._bs.distance[index],
vbm['energy'] - zero_energy if zero_to_efermi
else vbm['energy']))
bg = self._bs.get_band_gap()
direct = "Indirect"
if bg['direct']:
direct = "Direct"
return {'ticks': ticks, 'distances': distance, 'energy': energy,
'vbm': vbm_plot, 'cbm': cbm_plot,
'lattice': self._bs.lattice_rec.as_dict(),
'zero_energy': zero_energy, 'is_metal': self._bs.is_metal(),
'band_gap': "{} {} bandgap = {}".format(direct,
bg['transition'],
bg['energy'])
if not self._bs.is_metal() else ""}
def get_plot(self, zero_to_efermi=True, ylim=None, smooth=False,
vbm_cbm_marker=False,smooth_tol=None):
"""
Get a matplotlib object for the bandstructure plot.
Blue lines are up spin, red lines are down
spin.
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from
the eigenvalues and plot (E-Ef).
ylim: Specify the y-axis (energy) limits; by default None let
the code choose. It is vbm-4 and cbm+4 if insulator
efermi-10 and efermi+10 if metal
smooth: interpolates the bands by a spline cubic
smooth_tol (float) : tolerance for fitting spline to band data.
Default is None such that no tolerance will be used.
"""
plt = pretty_plot(12, 8)
from matplotlib import rc
import scipy.interpolate as scint
try:
rc('text', usetex=True)
except:
# Fall back on non Tex if errored.
rc('text', usetex=False)
# main internal config options
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
#band_linewidth = 3
band_linewidth = 1
data = self.bs_plot_data(zero_to_efermi)
if not smooth:
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d],
[data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))], 'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][d],
[data['energy'][d][str(Spin.down)][i][j]
for j in range(len(data['distances'][d]))],
'r--', linewidth=band_linewidth)
else:
# Interpolation failure can be caused by trying to fit an entire
# band with one spline rather than fitting with piecewise splines
# (splines are ill-suited to fit discontinuities).
#
# The number of splines used to fit a band is determined by the
# number of branches (high symmetry lines) defined in the
# BandStructureSymmLine object (see BandStructureSymmLine._branches).
warning = "WARNING! Distance / branch {d}, band {i} cannot be "+\
"interpolated.\n"+\
"See full warning in source.\n"+\
"If this is not a mistake, try increasing "+\
"smooth_tol.\nCurrent smooth_tol is {s}."
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
tck = scint.splrep(
data['distances'][d],
[data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))],
s = smooth_tol)
step = (data['distances'][d][-1]
- data['distances'][d][0]) / 1000
xs = [x * step + data['distances'][d][0]
for x in range(1000)]
ys = [scint.splev(x * step + data['distances'][d][0],
tck, der=0)
for x in range(1000)]
for y in ys:
if np.isnan(y):
print(warning.format(d=str(d),i=str(i),
s=str(smooth_tol)))
break
plt.plot(xs, ys, 'b-', linewidth=band_linewidth)
if self._bs.is_spin_polarized:
tck = scint.splrep(
data['distances'][d],
[data['energy'][d][str(Spin.down)][i][j]
for j in range(len(data['distances'][d]))],
s = smooth_tol)
step = (data['distances'][d][-1]
- data['distances'][d][0]) / 1000
xs = [x * step + data['distances'][d][0]
for x in range(1000)]
ys = [scint.splev(
x * step + data['distances'][d][0],
tck, der=0)
for x in range(1000)]
for y in ys:
if np.isnan(y):
print(warning.format(d=str(d),i=str(i),
s=str(smooth_tol)))
break
plt.plot(xs, ys, 'r--', linewidth=band_linewidth)
# plt.plot([x * step + data['distances'][d][0]
# for x in range(1000)],
# [scint.splev(
# x * step + data['distances'][d][0],
# tck, der=0)
# for x in range(1000)], 'r--',
# linewidth=band_linewidth)
self._maketicks(plt)
# Main X and Y Labels
plt.xlabel(r'$\mathrm{Wave\ Vector}$', fontsize=30)
ylabel = r'$\mathrm{E\ -\ E_f\ (eV)}$' if zero_to_efermi \
else r'$\mathrm{Energy\ (eV)}$'
plt.ylabel(ylabel, fontsize=30)
# Draw Fermi energy, only if not the zero
if not zero_to_efermi:
ef = self._bs.efermi
plt.axhline(ef, linewidth=2, color='k')
# X range (K)
# last distance point
x_max = data['distances'][-1][-1]
plt.xlim(0, x_max)
if ylim is None:
if self._bs.is_metal():
# Plot A Metal
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs.efermi + e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r', marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g', marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min,
data['cbm'][0][1] + e_max)
else:
plt.ylim(ylim)
plt.tight_layout()
return plt
def show(self, zero_to_efermi=True, ylim=None, smooth=False,
smooth_tol=None):
"""
Show the plot using matplotlib.
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from
the eigenvalues and plot (E-Ef).
ylim: Specify the y-axis (energy) limits; by default None let
the code choose. It is vbm-4 and cbm+4 if insulator
efermi-10 and efermi+10 if metal
smooth: interpolates the bands by a spline cubic
smooth_tol (float) : tolerance for fitting spline to band data.
Default is None such that no tolerance will be used.
"""
plt = self.get_plot(zero_to_efermi, ylim, smooth)
plt.show()
def save_plot(self, filename, img_format="eps", ylim=None,
zero_to_efermi=True, smooth=False):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(ylim=ylim, zero_to_efermi=zero_to_efermi,
smooth=smooth)
plt.savefig(filename, format=img_format)
plt.close()
def get_ticks(self):
"""
Get all ticks and labels for a band structure plot.
Returns:
A dict with 'distance': a list of distance at which ticks should
be set and 'label': a list of label for each of those ticks.
"""
tick_distance = []
tick_labels = []
previous_label = self._bs.kpoints[0].label
previous_branch = self._bs.branches[0]['name']
for i, c in enumerate(self._bs.kpoints):
if c.label is not None:
tick_distance.append(self._bs.distance[i])
this_branch = None
for b in self._bs.branches:
if b['start_index'] <= i <= b['end_index']:
this_branch = b['name']
break
if c.label != previous_label \
and previous_branch != this_branch:
label1 = c.label
if label1.startswith("\\") or label1.find("_") != -1:
label1 = "$" + label1 + "$"
label0 = previous_label
if label0.startswith("\\") or label0.find("_") != -1:
label0 = "$" + label0 + "$"
tick_labels.pop()
tick_distance.pop()
tick_labels.append(label0 + "$\\mid$" + label1)
else:
if c.label.startswith("\\") or c.label.find("_") != -1:
tick_labels.append("$" + c.label + "$")
else:
tick_labels.append(c.label)
previous_label = c.label
previous_branch = this_branch
return {'distance': tick_distance, 'label': tick_labels}
def plot_compare(self, other_plotter, legend=True):
"""
plot two band structure for comparison. One is in red the other in blue
(no difference in spins). The two band structures need to be defined
on the same symmetry lines! and the distance between symmetry lines is
the one of the band structure used to build the BSPlotter
Args:
another band structure object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
# TODO: add exception if the band structures are not compatible
import matplotlib.lines as mlines
plt = self.get_plot()
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig['distances'])):
plt.plot(data_orig['distances'][d],
[e[str(Spin.up)][i] for e in data['energy']][d],
'c-', linewidth=band_linewidth)
if other_plotter._bs.is_spin_polarized:
plt.plot(data_orig['distances'][d],
[e[str(Spin.down)][i] for e in data['energy']][d],
'm--', linewidth=band_linewidth)
if legend:
handles = [mlines.Line2D([], [], linewidth=2,
color='b', label='bs 1 up'),
mlines.Line2D([], [], linewidth=2,
color='r', label='bs 1 down', linestyle="--"),
mlines.Line2D([], [], linewidth=2,
color='c', label='bs 2 up'),
mlines.Line2D([], [], linewidth=2,
color='m', linestyle="--",
label='bs 2 down')]
plt.legend(handles=handles)
return plt
def plot_brillouin(self):
"""
plot the Brillouin zone
"""
# get labels and lines
labels = {}
for k in self._bs.kpoints:
if k.label:
labels[k.label] = k.frac_coords
lines = []
for b in self._bs.branches:
lines.append([self._bs.kpoints[b['start_index']].frac_coords, self._bs.kpoints[b['end_index']].frac_coords])
plot_brillouin_zone(self._bs.lattice_rec, lines=lines, labels=labels)
class BSPlotterProjected(BSPlotter):
"""
Class to plot or get data to facilitate the plot of band structure objects
projected along orbitals, elements or sites.
Args:
bs: A BandStructureSymmLine object with projections.
"""
def __init__(self, bs):
if len(bs.projections) == 0:
raise ValueError("try to plot projections"
" on a band structure without any")
super(BSPlotterProjected, self).__init__(bs)
def _get_projections_by_branches(self, dictio):
proj = self._bs.get_projections_on_elements_and_orbitals(dictio)
proj_br = []
for b in self._bs.branches:
if self._bs.is_spin_polarized:
proj_br.append(
{str(Spin.up): [[] for l in range(self._nb_bands)],
str(Spin.down): [[] for l in range(self._nb_bands)]})
else:
proj_br.append(
{str(Spin.up): [[] for l in range(self._nb_bands)]})
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index'] + 1):
proj_br[-1][str(Spin.up)][i].append(
{e: {o: proj[Spin.up][i][j][e][o]
for o in proj[Spin.up][i][j][e]}
for e in proj[Spin.up][i][j]})
if self._bs.is_spin_polarized:
for b in self._bs.branches:
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index'] + 1):
proj_br[-1][str(Spin.down)][i].append(
{e: {o: proj[Spin.down][i][j][e][o]
for o in proj[Spin.down][i][j][e]}
for e in proj[Spin.down][i][j]})
return proj_br
def get_projected_plots_dots(self, dictio, zero_to_efermi=True, ylim=None,
vbm_cbm_marker=False):
"""
Method returning a plot composed of subplots along different elements
and orbitals.
Args:
dictio: The element and orbitals you want a projection on. The
format is {Element:[Orbitals]} for instance
{'Cu':['d','s'],'O':['p']} will give projections for Cu on
d and s orbitals and on oxygen p.
Returns:
a pylab object with different subfigures for each projection
The blue and red colors are for spin up and spin down.
The bigger the red or blue dot in the band structure the higher
character for the corresponding element and orbital.
"""
band_linewidth = 1.0
fig_number = sum([len(v) for v in dictio.values()])
proj = self._get_projections_by_branches(dictio)
data = self.bs_plot_data(zero_to_efermi)
plt = pretty_plot(12, 8)
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
count = 1
for el in dictio:
for o in dictio[el]:
plt.subplot(100 * math.ceil(fig_number / 2) + 20 + count)
self._maketicks(plt)
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.up)][i][j]
for j in range(len(data['distances'][b]))],
'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.down)][i][j]
for j in
range(len(data['distances'][b]))],
'r--', linewidth=band_linewidth)
for j in range(
len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.down)][i][
j], 'ro',
markersize=
proj[b][str(Spin.down)][i][j][str(el)][
o] * 15.0)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.up)][i][j],
'bo',
markersize=
proj[b][str(Spin.up)][i][j][str(el)][
o] * 15.0)
if ylim is None:
if self._bs.is_metal():
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs.efermi
+ e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r',
marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g',
marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1]
+ e_max)
else:
plt.ylim(ylim)
plt.title(str(el) + " " + str(o))
count += 1
return plt
def get_elt_projected_plots(self, zero_to_efermi=True, ylim=None,
vbm_cbm_marker=False):
"""
Method returning a plot composed of subplots along different elements
Returns:
a pylab object with different subfigures for each projection
The blue and red colors are for spin up and spin down
The bigger the red or blue dot in the band structure the higher
character for the corresponding element and orbital
"""
band_linewidth = 1.0
proj = self._get_projections_by_branches({e.symbol: ['s', 'p', 'd']
for e in
self._bs.structure.composition.elements})
data = self.bs_plot_data(zero_to_efermi)
plt = pretty_plot(12, 8)
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
count = 1
for el in self._bs.structure.composition.elements:
plt.subplot(220 + count)
self._maketicks(plt)
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.up)][i][j]
for j in range(len(data['distances'][b]))], '-', color=[192/255,192/255,192/255],
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.down)][i][j]
for j in range(len(data['distances'][b]))],
'--', color=[128/255,128/255,128/255], linewidth=band_linewidth)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
markerscale = sum([proj[b][str(Spin.down)][i][
j][str(el)][o] for o in
proj[b]
[str(Spin.down)][i][j][
str(el)]])
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.down)][i][j], 'bo',
markersize=markerscale*15.0,color=[markerscale,0.3*markerscale,0.4*markerscale])
for j in range(len(data['energy'][b][str(Spin.up)][i])):
markerscale = sum(
[proj[b][str(Spin.up)][i][j][str(el)][o]
for o in proj[b]
[str(Spin.up)][i][j][str(el)]])
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.up)][i][j], 'o',
markersize=markerscale*15.0, color=[markerscale, 0.3*markerscale, 0.4*markerscale])
if ylim is None:
if self._bs.is_metal():
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs.efermi
+ e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r', marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g', marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1]
+ e_max)
else:
plt.ylim(ylim)
plt.title(str(el))
count += 1
return plt
def get_elt_projected_plots_color(self, zero_to_efermi=True,
elt_ordered=None):
"""
returns a pylab plot object with one plot where the band structure
line color depends on the character of the band (along different
elements). Each element is associated with red, green or blue
and the corresponding rgb color depending on the character of the band
is used. The method can only deal with binary and ternary compounds
spin up and spin down are differientiated by a '-' and a '--' line
Args:
elt_ordered: A list of Element ordered. The first one is red,
second green, last blue
Returns:
a pylab object
"""
band_linewidth = 3.0
if len(self._bs.structure.composition.elements) > 3:
raise ValueError
if elt_ordered is None:
elt_ordered = self._bs.structure.composition.elements
proj = self._get_projections_by_branches(
{e.symbol: ['s', 'p', 'd']
for e in self._bs.structure.composition.elements})
data = self.bs_plot_data(zero_to_efermi)
plt = pretty_plot(12, 8)
spins = [Spin.up]
if self._bs.is_spin_polarized:
spins = [Spin.up, Spin.down]
self._maketicks(plt)
for s in spins:
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
for j in range(len(data['energy'][b][str(s)][i]) - 1):
sum_e = 0.0
for el in elt_ordered:
sum_e = sum_e + \
sum([proj[b][str(s)][i][j][str(el)][o]
for o
in proj[b][str(s)][i][j][str(el)]])
if sum_e == 0.0:
color = [0.0] * len(elt_ordered)
else:
color = [sum([proj[b][str(s)][i][j][str(el)][o]
for o
in proj[b][str(s)][i][j][str(el)]])
/ sum_e
for el in elt_ordered]
if len(color) == 2:
color.append(0.0)
color[2] = color[1]
color[1] = 0.0
sign = '-'
if s == Spin.down:
sign = '--'
plt.plot([data['distances'][b][j],
data['distances'][b][j + 1]],
[data['energy'][b][str(s)][i][j],
data['energy'][b][str(s)][i][j + 1]], sign,
color=color, linewidth=band_linewidth)
if self._bs.is_metal():
if zero_to_efermi:
e_min = -10
e_max = 10
plt.ylim(e_min, e_max)
plt.ylim(self._bs.efermi + e_min, self._bs.efermi + e_max)
else:
plt.ylim(data['vbm'][0][1] - 4.0, data['cbm'][0][1] + 2.0)
return plt
def _get_projections_by_branches_patom_pmorb(self, dictio, dictpa, sum_atoms, sum_morbs, selected_branches):
import copy
setos = {'s': 0, 'py': 1, 'pz': 2, 'px': 3, 'dxy': 4, 'dyz': 5, 'dz2': 6, 'dxz': 7,
'dx2': 8, 'f_3': 9, 'f_2': 10, 'f_1': 11, 'f0': 12, 'f1': 13, 'f2': 14, 'f3': 15}
num_branches = len(self._bs.branches)
if selected_branches is not None:
indices = []
if not isinstance(selected_branches, list):
raise TypeError("You do not give a correct type of 'selected_branches'. It should be 'list' type.")
elif len(selected_branches) == 0:
raise ValueError("The 'selected_branches' is empty. We cannot do anything.")
else:
for index in selected_branches:
if not isinstance(index, int):
raise ValueError("You do not give a correct type of index of symmetry lines. It should be "
"'int' type")
elif index > num_branches or index < 1:
raise ValueError("You give a incorrect index of symmetry lines: %s. The index should be in "
"range of [1, %s]." % (str(index), str(num_branches)))
else:
indices.append(index-1)
else:
indices = range(0, num_branches)
proj = self._bs.projections
proj_br = []
for index in indices:
b = self._bs.branches[index]
print(b)
if self._bs.is_spin_polarized:
proj_br.append(
{str(Spin.up): [[] for l in range(self._nb_bands)],
str(Spin.down): [[] for l in range(self._nb_bands)]})
else:
proj_br.append(
{str(Spin.up): [[] for l in range(self._nb_bands)]})
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index'] + 1):
edict = {}
for elt in dictpa:
for anum in dictpa[elt]:
edict[elt + str(anum)] = {}
for morb in dictio[elt]:
edict[elt + str(anum)][morb] = proj[Spin.up][i][j][setos[morb]][anum-1]
proj_br[-1][str(Spin.up)][i].append(edict)
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index'] + 1):
edict = {}
for elt in dictpa:
for anum in dictpa[elt]:
edict[elt + str(anum)] = {}
for morb in dictio[elt]:
edict[elt + str(anum)][morb] = proj[Spin.up][i][j][setos[morb]][anum-1]
proj_br[-1][str(Spin.down)][i].append(edict)
# Adjusting projections for plot
dictio_d, dictpa_d = self._summarize_keys_for_plot(dictio, dictpa, sum_atoms, sum_morbs)
print('dictio_d: %s' % str(dictio_d))
print('dictpa_d: %s' % str(dictpa_d))
if (sum_atoms is None) and (sum_morbs is None):
proj_br_d = copy.deepcopy(proj_br)
else:
proj_br_d = []
branch = -1
for index in indices:
branch += 1
br = self._bs.branches[index]
if self._bs.is_spin_polarized:
proj_br_d.append(
{str(Spin.up): [[] for l in range(self._nb_bands)],
str(Spin.down): [[] for l in range(self._nb_bands)]})
else:
proj_br_d.append(
{str(Spin.up): [[] for l in range(self._nb_bands)]})
if (sum_atoms is not None) and (sum_morbs is None):
for i in range(self._nb_bands):
for j in range(br['end_index'] - br['start_index'] + 1):
atoms_morbs = copy.deepcopy(proj_br[branch][str(Spin.up)][i][j])
edict = {}
for elt in dictpa:
if elt in sum_atoms:
for anum in dictpa_d[elt][:-1]:
edict[elt + anum] = copy.deepcopy(atoms_morbs[elt + anum])
edict[elt + dictpa_d[elt][-1]] = {}
for morb in dictio[elt]:
sprojection = 0.0
for anum in sum_atoms[elt]:
sprojection += atoms_morbs[elt + str(anum)][morb]
edict[elt + dictpa_d[elt][-1]][morb] = sprojection
else:
for anum in dictpa_d[elt]:
edict[elt + anum] = copy.deepcopy(atoms_morbs[elt + anum])
proj_br_d[-1][str(Spin.up)][i].append(edict)
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
for j in range(br['end_index'] - br['start_index'] + 1):
atoms_morbs = copy.deepcopy(proj_br[branch][str(Spin.down)][i][j])
edict = {}
for elt in dictpa:
if elt in sum_atoms:
for anum in dictpa_d[elt][:-1]:
edict[elt + anum] = copy.deepcopy(atoms_morbs[elt + anum])
edict[elt + dictpa_d[elt][-1]] = {}
for morb in dictio[elt]:
sprojection = 0.0
for anum in sum_atoms[elt]:
sprojection += atoms_morbs[elt + str(anum)][morb]
edict[elt + dictpa_d[elt][-1]][morb] = sprojection
else:
for anum in dictpa_d[elt]:
edict[elt + anum] = copy.deepcopy(atoms_morbs[elt + anum])
proj_br_d[-1][str(Spin.down)][i].append(edict)
elif (sum_atoms is None) and (sum_morbs is not None):
for i in range(self._nb_bands):
for j in range(br['end_index'] - br['start_index'] + 1):
atoms_morbs = copy.deepcopy(proj_br[branch][str(Spin.up)][i][j])
edict = {}
for elt in dictpa:
if elt in sum_morbs:
for anum in dictpa_d[elt]:
edict[elt + anum] = {}
for morb in dictio_d[elt][:-1]:
edict[elt + anum][morb] = atoms_morbs[elt + anum][morb]
sprojection = 0.0
for morb in sum_morbs[elt]:
sprojection += atoms_morbs[elt + anum][morb]
edict[elt + anum][dictio_d[elt][-1]] = sprojection
else:
for anum in dictpa_d[elt]:
edict[elt + anum] = copy.deepcopy(atoms_morbs[elt + anum])
proj_br_d[-1][str(Spin.up)][i].append(edict)
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
for j in range(br['end_index'] - br['start_index'] + 1):
atoms_morbs = copy.deepcopy(proj_br[branch][str(Spin.down)][i][j])
edict = {}
for elt in dictpa:
if elt in sum_morbs:
for anum in dictpa_d[elt]:
edict[elt + anum] = {}
for morb in dictio_d[elt][:-1]:
edict[elt + anum][morb] = atoms_morbs[elt + anum][morb]
sprojection = 0.0
for morb in sum_morbs[elt]:
sprojection += atoms_morbs[elt + anum][morb]
edict[elt + anum][dictio_d[elt][-1]] = sprojection
else:
for anum in dictpa_d[elt]:
edict[elt + anum] = copy.deepcopy(atoms_morbs[elt + anum])
proj_br_d[-1][str(Spin.down)][i].append(edict)
else:
for i in range(self._nb_bands):
for j in range(br['end_index'] - br['start_index'] + 1):
atoms_morbs = copy.deepcopy(proj_br[branch][str(Spin.up)][i][j])
edict = {}
for elt in dictpa:
if (elt in sum_atoms) and (elt in sum_morbs):
for anum in dictpa_d[elt][:-1]:
edict[elt + anum] = {}
for morb in dictio_d[elt][:-1]:
edict[elt + anum][morb] = atoms_morbs[elt + anum][morb]
sprojection = 0.0
for morb in sum_morbs[elt]:
sprojection += atoms_morbs[elt + anum][morb]
edict[elt + anum][dictio_d[elt][-1]] = sprojection
edict[elt + dictpa_d[elt][-1]] = {}
for morb in dictio_d[elt][:-1]:
sprojection = 0.0
for anum in sum_atoms[elt]:
sprojection += atoms_morbs[elt + str(anum)][morb]
edict[elt + dictpa_d[elt][-1]][morb] = sprojection
sprojection = 0.0
for anum in sum_atoms[elt]:
for morb in sum_morbs[elt]:
sprojection += atoms_morbs[elt + str(anum)][morb]
edict[elt + dictpa_d[elt][-1]][dictio_d[elt][-1]] = sprojection
elif (elt in sum_atoms) and (elt not in sum_morbs):
for anum in dictpa_d[elt][:-1]:
edict[elt + anum] = copy.deepcopy(atoms_morbs[elt + anum])
edict[elt + dictpa_d[elt][-1]] = {}
for morb in dictio[elt]:
sprojection = 0.0
for anum in sum_atoms[elt]:
sprojection += atoms_morbs[elt + str(anum)][morb]
edict[elt + dictpa_d[elt][-1]][morb] = sprojection
elif (elt not in sum_atoms) and (elt in sum_morbs):
for anum in dictpa_d[elt]:
edict[elt + anum] = {}
for morb in dictio_d[elt][:-1]:
edict[elt + anum][morb] = atoms_morbs[elt + anum][morb]
sprojection = 0.0
for morb in sum_morbs[elt]:
sprojection += atoms_morbs[elt + anum][morb]
edict[elt + anum][dictio_d[elt][-1]] = sprojection
else:
for anum in dictpa_d[elt]:
edict[elt + anum] = {}
for morb in dictio_d[elt]:
edict[elt + anum][morb] = atoms_morbs[elt + anum][morb]
proj_br_d[-1][str(Spin.up)][i].append(edict)
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
for j in range(br['end_index'] - br['start_index'] + 1):
atoms_morbs = copy.deepcopy(proj_br[branch][str(Spin.down)][i][j])
edict = {}
for elt in dictpa:
if (elt in sum_atoms) and (elt in sum_morbs):
for anum in dictpa_d[elt][:-1]:
edict[elt + anum] = {}
for morb in dictio_d[elt][:-1]:
edict[elt + anum][morb] = atoms_morbs[elt + anum][morb]
sprojection = 0.0
for morb in sum_morbs[elt]:
sprojection += atoms_morbs[elt + anum][morb]
edict[elt + anum][dictio_d[elt][-1]] = sprojection
edict[elt + dictpa_d[elt][-1]] = {}
for morb in dictio_d[elt][:-1]:
sprojection = 0.0
for anum in sum_atoms[elt]:
sprojection += atoms_morbs[elt + str(anum)][morb]
edict[elt + dictpa_d[elt][-1]][morb] = sprojection
sprojection = 0.0
for anum in sum_atoms[elt]:
for morb in sum_morbs[elt]:
sprojection += atoms_morbs[elt + str(anum)][morb]
edict[elt + dictpa_d[elt][-1]][dictio_d[elt][-1]] = sprojection
elif (elt in sum_atoms) and (elt not in sum_morbs):
for anum in dictpa_d[elt][:-1]:
edict[elt + anum] = copy.deepcopy(atoms_morbs[elt + anum])
edict[elt + dictpa_d[elt][-1]] = {}
for morb in dictio[elt]:
sprojection = 0.0
for anum in sum_atoms[elt]:
sprojection += atoms_morbs[elt + str(anum)][morb]
edict[elt + dictpa_d[elt][-1]][morb] = sprojection
elif (elt not in sum_atoms) and (elt in sum_morbs):
for anum in dictpa_d[elt]:
edict[elt + anum] = {}
for morb in dictio_d[elt][:-1]:
edict[elt + anum][morb] = atoms_morbs[elt + anum][morb]
sprojection = 0.0
for morb in sum_morbs[elt]:
sprojection += atoms_morbs[elt + anum][morb]
edict[elt + anum][dictio_d[elt][-1]] = sprojection
else:
for anum in dictpa_d[elt]:
edict[elt + anum] = {}
for morb in dictio_d[elt]:
edict[elt + anum][morb] = atoms_morbs[elt + anum][morb]
proj_br_d[-1][str(Spin.down)][i].append(edict)
return proj_br_d, dictio_d, dictpa_d, indices
def get_projected_plots_dots_patom_pmorb(self, dictio, dictpa, sum_atoms=None, sum_morbs=None,
zero_to_efermi=True, ylim=None, vbm_cbm_marker=False,
selected_branches=None, w_h_size=(12,8), num_column = None):
"""
Method returns a plot composed of subplots for different atoms and orbitals (subshell orbitals such as
's', 'p', 'd' and 'f' defined by azimuthal quantum numbers l = 0, 1, 2 and 3, respectively or
individual orbitals like 'px', 'py' and 'pz' defined by magnetic quantum numbers m = -1, 1 and 0, respectively).
This is an extension of "get_projected_plots_dots" method.
Args:
dictio: The elements and the orbitals you need to project on. The format is {Element:[Orbitals]},
for instance:
{'Cu':['dxy','s','px'],'O':['px','py','pz']} will give projections for Cu on orbitals dxy, s, px and
for O on orbitals px, py, pz. If you want to sum over all individual orbitals of subshell orbitals,
for example, 'px', 'py' and 'pz' of O, just simply set
{'Cu':['dxy','s','px'],'O':['p']} and set sum_morbs (see explainations below) as {'O':[p],...}.
Otherwise, you will get an error.
dictpa: The elements and their sites (defined by site numbers) you need to project on. The format is
{Element: [Site numbers]}, for instance: {'Cu':[1,5],'O':[3,4]} will give projections for Cu on site-1
and on site-5, O on site-3 and on site-4 in the cell.
Attention:
The correct site numbers of atoms are consistent with themselves in the structure computed. Normally,
the structure should be totally similar with POSCAR file, however, sometimes VASP can rotate or
translate the cell. Thus, it would be safe if using Vasprun class to get the final_structure and as a
result, correct index numbers of atoms.
sum_atoms: Sum projection of the similar atoms together (e.g.: Cu on site-1 and Cu on site-5). The format is
{Element: [Site numbers]}, for instance:
{'Cu': [1,5], 'O': [3,4]} means summing projections over Cu on site-1 and Cu on site-5 and O on site-3
and on site-4. If you do not want to use this functional, just turn it off by setting sum_atoms = None.
sum_morbs: Sum projections of individual orbitals of similar atoms together (e.g.: 'dxy' and 'dxz'). The
format is {Element: [individual orbitals]}, for instance:
{'Cu': ['dxy', 'dxz'], 'O': ['px', 'py']} means summing projections over 'dxy' and 'dxz' of Cu and 'px'
and 'py' of O. If you do not want to use this functional, just turn it off by setting sum_morbs = None.
selected_branches: The index of symmetry lines you chose for plotting. This can be useful when the number of
symmetry lines (in KPOINTS file) are manny while you only want to show for certain ones. The format is
[index of line], for instance:
[1, 3, 4] means you just need to do projection along lines number 1, 3 and 4 while neglecting lines
number 2 and so on. By default, this is None type and all symmetry lines will be plotted.
w_h_size: This variable help you to control the width and height of figure. By default, width = 12 and
height = 8 (inches). The width/height ratio is kept the same for subfigures and the size of each depends
on how many number of subfigures are plotted.
num_column: This variable help you to manage how the subfigures are arranged in the figure by setting
up the number of columns of subfigures. The value should be an int number. For example, num_column = 3
means you want to plot subfigures in 3 columns. By default, num_column = None and subfigures are
aligned in 2 columns.
Returns:
A pylab object with different subfigures for different projections. The blue and red colors lines are bands
for spin up and spin down. The green and cyan dots are projections for spin up and spin down. The bigger
the green or cyan dots in the projected band structures, the higher character for the corresponding elements
and orbitals. List of individual orbitals and their numbers (set up by VASP and no special meaning):
s = 0; py = 1 pz = 2 px = 3; dxy = 4 dyz = 5 dz2 = 6 dxz = 7 dx2 = 8;
f_3 = 9 f_2 = 10 f_1 = 11 f0 = 12 f1 = 13 f2 = 14 f3 = 15
"""
dictio, sum_morbs = self._Orbitals_SumOrbitals(dictio, sum_morbs)
dictpa, sum_atoms, number_figs = self._number_of_subfigures(dictio, dictpa, sum_atoms, sum_morbs)
print('Number of subfigures: %s' % str(number_figs))
if number_figs > 9:
print("The number of sub-figures %s might be too manny and the implementation might take a long time.\n"
"A smaller number or a plot with selected symmetry lines (selected_branches) might be better.\n"
% str(number_figs))
import math
from pymatgen.util.plotting import pretty_plot
band_linewidth = 0.5
plt = pretty_plot(w_h_size[0], w_h_size[1])
proj_br_d, dictio_d, dictpa_d, branches = self._get_projections_by_branches_patom_pmorb(dictio, dictpa,
sum_atoms, sum_morbs, selected_branches)
data = self.bs_plot_data(zero_to_efermi)
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
count = 0
for elt in dictpa_d:
for numa in dictpa_d[elt]:
for o in dictio_d[elt]:
count += 1
if num_column is None:
if number_figs == 1:
plt.subplot(1,1,1)
else:
row = number_figs/2
if number_figs % 2 == 0:
plt.subplot(row,2,count)
else:
plt.subplot(row+1,2,count)
elif isinstance(num_column, int):
row = number_figs/num_column
if number_figs % num_column == 0:
plt.subplot(row,num_column,count)
else:
plt.subplot(row+1,num_column,count)
else:
raise ValueError("The invalid 'num_column' is assigned. It should be an integer.")
plt, shift = self._maketicks_selected(plt, branches)
br = -1
for b in branches:
br += 1
for i in range(self._nb_bands):
plt.plot(map(lambda x: x-shift[br], data['distances'][b]),
[data['energy'][b][str(Spin.up)][i][j]
for j in range(len(data['distances'][b]))],
'b-', linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(map(lambda x: x-shift[br], data['distances'][b]),
[data['energy'][b][str(Spin.down)][i][j]
for j in range(len(data['distances'][b]))],
'r--', linewidth=band_linewidth)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j] - shift[br],
data['energy'][b][str(Spin.down)][i][j], 'co', markersize=\
proj_br_d[br][str(Spin.down)][i][j][elt + numa][o] * 15.0)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j] - shift[br],
data['energy'][b][str(Spin.up)][i][j], 'go', markersize=\
proj_br_d[br][str(Spin.up)][i][j][elt + numa][o] * 15.0)
if ylim is None:
if self._bs.is_metal():
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs._efermi
+ e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r',
marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g',
marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1]
+ e_max)
else:
plt.ylim(ylim)
plt.title(elt + "_" + numa + "_" + str(o))
return plt
def _Orbitals_SumOrbitals(self, dictio, sum_morbs):
from pymatgen.core.periodic_table import Element
from collections import Counter
import copy
all_orbitals = ['s', 'p', 'd', 'f', 'px', 'py', 'pz', 'dxy', 'dyz', 'dxz', 'dx2', 'dz2',
'f_3', 'f_2', 'f_1', 'f0', 'f1', 'f2', 'f3']
individual_orbs = {'p': ['px', 'py', 'pz'], 'd': ['dxy', 'dyz', 'dxz', 'dx2', 'dz2'],
'f': ['f_3', 'f_2', 'f_1', 'f0', 'f1', 'f2', 'f3']}
if (not isinstance(dictio, dict)):
raise TypeError("The invalid type of 'dictio' was bound. It should be dict type.")
elif len(dictio.keys()) == 0:
raise KeyError("The 'dictio' is empty. We cannot do anything.")
else:
for elt in dictio:
if Element.is_valid_symbol(elt):
if isinstance(dictio[elt], list):
if len(dictio[elt]) == 0:
raise ValueError("The dictio[%s] is empty. We cannot do anything" % elt)
for orb in dictio[elt]:
if not isinstance(orb, str):
raise ValueError("The invalid format of orbitals is in 'dictio[%s]': %s. "
"They should be string." % (elt,str(orb)))
elif orb not in all_orbitals:
raise ValueError("The invalid name of orbital is given in 'dictio[%s]'." % elt)
else:
if orb in individual_orbs.keys():
if len(set(dictio[elt]).intersection(individual_orbs[orb])) != 0:
raise ValueError("The 'dictio[%s]' contains orbitals repeated." % elt)
else:
pass
else:
pass
nelems = Counter(dictio[elt]).values()
if sum(nelems) > len(nelems):
raise ValueError("You put in at least two similar orbitals in dictio[%s]." % elt)
else:
raise TypeError("The invalid type of value was put into 'dictio[%s]'. It should be list "
"type." % elt)
else:
raise KeyError("The invalid element was put into 'dictio' as a key: %s" % elt)
if sum_morbs is None:
print("You do not want to sum projection over orbitals.")
elif (not isinstance(sum_morbs, dict)):
raise TypeError("The invalid type of 'sum_orbs' was bound. It should be dict or 'None' type.")
elif len(sum_morbs.keys()) == 0:
raise KeyError("The 'sum_morbs' is empty. We cannot do anything")
else:
for elt in sum_morbs:
if Element.is_valid_symbol(elt):
if isinstance(sum_morbs[elt], list):
for orb in sum_morbs[elt]:
if not isinstance(orb, str):
raise TypeError("The invalid format of orbitals is in 'sum_morbs[%s]': %s. "
"They should be string." % (elt,str(orb)))
elif orb not in all_orbitals:
raise ValueError("The invalid name of orbital in 'sum_morbs[%s]' is given." % elt)
else:
if orb in individual_orbs.keys():
if len(set(sum_morbs[elt]).intersection(individual_orbs[orb])) != 0:
raise ValueError("The 'sum_morbs[%s]' contains orbitals repeated." % elt)
else:
pass
else:
pass
nelems = Counter(sum_morbs[elt]).values()
if sum(nelems) > len(nelems):
raise ValueError("You put in at least two similar orbitals in sum_morbs[%s]." % elt)
else:
raise TypeError("The invalid type of value was put into 'sum_morbs[%s]'. It should be list "
"type." % elt)
if elt not in dictio.keys():
raise ValueError("You cannot sum projection over orbitals of atoms '%s' because they are not "
"mentioned in 'dictio'." % elt)
else:
raise KeyError("The invalid element was put into 'sum_morbs' as a key: %s" % elt)
for elt in dictio:
if len(dictio[elt]) == 1:
if len(dictio[elt][0]) > 1:
if elt in sum_morbs.keys():
raise ValueError("You cannot sum projection over one individual orbital '%s' of '%s'." %
(dictio[elt][0], elt))
else:
pass
else:
if sum_morbs is None:
pass
elif elt not in sum_morbs.keys():
print("You do not want to sum projection over orbitals of element: %s" % elt)
else:
if len(sum_morbs[elt]) == 0:
raise ValueError("The empty list is an invalid value for sum_morbs[%s]." % elt)
elif len(sum_morbs[elt]) > 1:
for orb in sum_morbs[elt]:
if dictio[elt][0] not in orb:
raise ValueError("The invalid orbital '%s' was put into 'sum_morbs[%s]'." %
(orb, elt))
else:
pass
else:
if (orb == 's' or len(orb) > 1):
raise ValueError("The invalid orbital '%s' was put into sum_orbs['%s']." % (orb, elt))
else:
sum_morbs[elt] = individual_orbs[dictio[elt][0]]
else:
duplicate = copy.deepcopy(dictio[elt])
for orb in dictio[elt]:
if orb in individual_orbs.keys():
duplicate.remove(orb)
for o in individual_orbs[orb]:
duplicate.append(o)
else:
pass
dictio[elt] = copy.deepcopy(duplicate)
if sum_morbs is None:
pass
elif elt not in sum_morbs.keys():
print("You do not want to sum projection over orbitals of element: %s" % elt)
else:
if len(sum_morbs[elt]) == 0:
raise ValueError("The empty list is an invalid value for sum_morbs[%s]." % elt)
elif len(sum_morbs[elt]) == 1:
orb = sum_morbs[elt][0]
if orb == 's':
raise ValueError("We do not sum projection over only 's' orbital of the same "
"type of element.")
elif orb in individual_orbs.keys():
sum_morbs[elt].pop(0)
for o in individual_orbs[orb]:
sum_morbs[elt].append(o)
else:
raise ValueError("You never sum projection over one orbital in sum_morbs[%s]" % elt)
else:
duplicate = copy.deepcopy(sum_morbs[elt])
for orb in sum_morbs[elt]:
if orb in individual_orbs.keys():
duplicate.remove(orb)
for o in individual_orbs[orb]:
duplicate.append(o)
else:
pass
sum_morbs[elt] = copy.deepcopy(duplicate)
for orb in sum_morbs[elt]:
if orb not in dictio[elt]:
raise ValueError("The orbitals of sum_morbs[%s] conflict with those of dictio[%s]." %
(elt, elt))
return dictio, sum_morbs
def _number_of_subfigures(self, dictio, dictpa, sum_atoms, sum_morbs):
from pymatgen.core.periodic_table import Element
from collections import Counter
if (not isinstance(dictpa, dict)):
raise TypeError("The invalid type of 'dictpa' was bound. It should be dict type.")
elif len(dictpa.keys()) == 0:
raise KeyError("The 'dictpa' is empty. We cannot do anything.")
else:
for elt in dictpa:
if Element.is_valid_symbol(elt):
if isinstance(dictpa[elt], list):
if len(dictpa[elt]) == 0:
raise ValueError("The dictpa[%s] is empty. We cannot do anything" % elt)
_sites = self._bs.structure.sites
indices = []
for i in range(0, len(_sites)):
if _sites[i]._species.keys()[0].__eq__(Element(elt)):
indices.append(i+1)
for number in dictpa[elt]:
if isinstance(number, str):
if 'all' == number.lower():
dictpa[elt] = indices
print("You want to consider all '%s' atoms." % elt)
break
else:
raise ValueError("You put wrong site numbers in 'dictpa[%s]': %s." %
(elt,str(number)))
elif isinstance(number, int):
if number not in indices:
raise ValueError("You put wrong site numbers in 'dictpa[%s]': %s." %
(elt,str(number)))
else:
raise ValueError("You put wrong site numbers in 'dictpa[%s]': %s." % (elt,str(number)))
nelems = Counter(dictpa[elt]).values()
if sum(nelems) > len(nelems):
raise ValueError("You put at least two similar site numbers into 'dictpa[%s]'." % elt)
else:
raise TypeError("The invalid type of value was put into 'dictpa[%s]'. It should be list "
"type." % elt)
else:
raise KeyError("The invalid element was put into 'dictpa' as a key: %s" % elt)
if len(dictio.keys()) != len(dictpa.keys()):
raise KeyError("The number of keys in 'dictio' and 'dictpa' are not the same.")
else:
for elt in dictio.keys():
if elt not in dictpa.keys(): raise KeyError("The element '%s' is not in both dictpa and dictio." % elt)
for elt in dictpa.keys():
if elt not in dictio.keys(): raise KeyError("The element '%s' in not in both dictpa and dictio." % elt)
if sum_atoms is None:
print("You do not want to sum projection over atoms.")
elif (not isinstance(sum_atoms, dict)):
raise TypeError("The invalid type of 'sum_atoms' was bound. It should be dict type.")
elif len(sum_atoms.keys()) == 0:
raise KeyError("The 'sum_atoms' is empty. We cannot do anything.")
else:
for elt in sum_atoms:
if Element.is_valid_symbol(elt):
if isinstance(sum_atoms[elt], list):
if len(sum_atoms[elt]) == 0:
raise ValueError("The sum_atoms[%s] is empty. We cannot do anything" % elt)
_sites = self._bs.structure.sites
indices = []
for i in range(0, len(_sites)):
if _sites[i]._species.keys()[0].__eq__(Element(elt)):
indices.append(i+1)
for number in sum_atoms[elt]:
if isinstance(number, str):
if 'all' == number.lower():
sum_atoms[elt] = indices
print("You want to sum projection over all '%s' atoms." % elt)
break
else:
raise ValueError("You put wrong site numbers in 'sum_atoms[%s]'." % elt)
elif isinstance(number, int):
if number not in indices:
raise ValueError("You put wrong site numbers in 'sum_atoms[%s]'." % elt)
elif number not in dictpa[elt]:
raise ValueError("You cannot sum projection with atom number '%s' because it is not "
"metioned in dicpta[%s]" % (str(number), elt))
else:
raise ValueError("You put wrong site numbers in 'sum_atoms[%s]'." % elt)
nelems = Counter(sum_atoms[elt]).values()
if sum(nelems) > len(nelems):
raise ValueError("You put at least two similar site numbers into 'sum_atoms[%s]'." % elt)
else:
raise TypeError("The invalid type of value was put into 'sum_atoms[%s]'. It should be list "
"type." % elt)
if elt not in dictpa.keys():
raise ValueError("You cannot sum projection over atoms '%s' because it is not "
"mentioned in 'dictio'." % elt)
else:
raise KeyError("The invalid element was put into 'sum_atoms' as a key: %s" % elt)
if len(sum_atoms[elt]) == 1:
raise ValueError("We do not sum projection over only one atom: %s" % elt)
max_number_figs = 0
decrease = 0
for elt in dictio:
max_number_figs += len(dictio[elt]) * len(dictpa[elt])
if (sum_atoms is None) and (sum_morbs is None):
number_figs = max_number_figs
elif (sum_atoms is not None) and (sum_morbs is None):
for elt in sum_atoms:
decrease += (len(sum_atoms[elt]) - 1) * len(dictio[elt])
number_figs = max_number_figs - decrease
elif (sum_atoms is None) and (sum_morbs is not None):
for elt in sum_morbs:
decrease += (len(sum_morbs[elt]) - 1) * len(dictpa[elt])
number_figs = max_number_figs - decrease
elif (sum_atoms is not None) and (sum_morbs is not None):
for elt in sum_atoms:
decrease += (len(sum_atoms[elt]) - 1) * len(dictio[elt])
for elt in sum_morbs:
if elt in sum_atoms:
decrease += (len(sum_morbs[elt]) - 1) * (len(dictpa[elt]) - len(sum_atoms[elt]) + 1)
else:
decrease += (len(sum_morbs[elt]) - 1) * len(dictpa[elt])
number_figs = max_number_figs - decrease
else:
raise ValueError("Invalid format of 'sum_atoms' and 'sum_morbs'.")
return dictpa, sum_atoms, number_figs
def _summarize_keys_for_plot(self, dictio, dictpa, sum_atoms, sum_morbs):
from pymatgen.core.periodic_table import Element
individual_orbs = {'p': ['px', 'py', 'pz'], 'd': ['dxy', 'dyz', 'dxz', 'dx2', 'dz2'],
'f': ['f_3', 'f_2', 'f_1', 'f0', 'f1', 'f2', 'f3']}
def number_label(list_numbers):
list_numbers = sorted(list_numbers)
divide = [[]]
divide[0].append(list_numbers[0])
group = 0
for i in range(1, len(list_numbers)):
if list_numbers[i] == list_numbers[i-1] + 1:
divide[group].append(list_numbers[i])
else:
group += 1
divide.append([list_numbers[i]])
label = ""
for elem in divide:
if len(elem) > 1:
label += str(elem[0]) + "-" + str(elem[-1]) + ","
else:
label += str(elem[0]) + ","
return label[:-1]
def orbital_label(list_orbitals):
divide = {}
for orb in list_orbitals:
if orb[0] in divide:
divide[orb[0]].append(orb)
else:
divide[orb[0]] = []
divide[orb[0]].append(orb)
label = ""
for elem in divide:
if elem == 's':
label += "s" + ","
else:
if len(divide[elem]) == len(individual_orbs[elem]):
label += elem + ","
else:
l = [o[1:] for o in divide[elem]]
label += elem + str(l).replace("['","").replace("']","").replace("', '","-") + ","
return label[:-1]
if (sum_atoms is None) and (sum_morbs is None):
dictio_d = dictio
dictpa_d = {elt: [str(anum) for anum in dictpa[elt]] for elt in dictpa}
elif (sum_atoms is not None) and (sum_morbs is None):
dictio_d = dictio
dictpa_d = {}
for elt in dictpa:
dictpa_d[elt] = []
if elt in sum_atoms:
_sites = self._bs.structure.sites
indices = []
for i in range(0, len(_sites)):
if _sites[i]._species.keys()[0].__eq__(Element(elt)):
indices.append(i+1)
flag_1 = len(set(dictpa[elt]).intersection(indices))
flag_2 = len(set(sum_atoms[elt]).intersection(indices))
if flag_1 == len(indices) and flag_2 == len(indices):
dictpa_d[elt].append('all')
else:
for anum in dictpa[elt]:
if anum not in sum_atoms[elt]:
dictpa_d[elt].append(str(anum))
label = number_label(sum_atoms[elt])
dictpa_d[elt].append(label)
else:
for anum in dictpa[elt]:
dictpa_d[elt].append(str(anum))
elif (sum_atoms is None) and (sum_morbs is not None):
dictio_d = {}
for elt in dictio:
dictio_d[elt] = []
if elt in sum_morbs:
for morb in dictio[elt]:
if morb not in sum_morbs[elt]:
dictio_d[elt].append(morb)
label = orbital_label(sum_morbs[elt])
dictio_d[elt].append(label)
else:
dictio_d[elt] = dictio[elt]
dictpa_d = {elt: [str(anum) for anum in dictpa[elt]] for elt in dictpa}
else:
dictio_d = {}
for elt in dictio:
dictio_d[elt] = []
if elt in sum_morbs:
for morb in dictio[elt]:
if morb not in sum_morbs[elt]:
dictio_d[elt].append(morb)
label = orbital_label(sum_morbs[elt])
dictio_d[elt].append(label)
else:
dictio_d[elt] = dictio[elt]
dictpa_d = {}
for elt in dictpa:
dictpa_d[elt] = []
if elt in sum_atoms:
_sites = self._bs.structure.sites
indices = []
for i in range(0, len(_sites)):
if _sites[i]._species.keys()[0].__eq__(Element(elt)):
indices.append(i + 1)
flag_1 = len(set(dictpa[elt]).intersection(indices))
flag_2 = len(set(sum_atoms[elt]).intersection(indices))
if flag_1 == len(indices) and flag_2 == len(indices):
dictpa_d[elt].append('all')
else:
for anum in dictpa[elt]:
if anum not in sum_atoms[elt]:
dictpa_d[elt].append(str(anum))
label = number_label(sum_atoms[elt])
dictpa_d[elt].append(label)
else:
for anum in dictpa[elt]:
dictpa_d[elt].append(str(anum))
return dictio_d, dictpa_d
def _maketicks_selected(self, plt, branches):
"""
utility private method to add ticks to a band structure with selected branches
"""
ticks = self.get_ticks()
distance = []
label = []
rm_elems = []
for i in range(1, len(ticks['distance'])):
if ticks['label'][i] == ticks['label'][i-1]:
rm_elems.append(i)
for i in range(len(ticks['distance'])):
if i not in rm_elems:
distance.append(ticks['distance'][i])
label.append(ticks['label'][i])
l_branches = [distance[i]-distance[i-1] for i in range(1,len(distance))]
n_distance = []
n_label = []
for branch in branches:
n_distance.append(l_branches[branch])
if ("$\\mid$" not in label[branch]) and ("$\\mid$" not in label[branch+1]):
n_label.append([label[branch], label[branch+1]])
elif ("$\\mid$" in label[branch]) and ("$\\mid$" not in label[branch+1]):
n_label.append([label[branch].split("$")[-1], label[branch+1]])
elif ("$\\mid$" not in label[branch]) and ("$\\mid$" in label[branch+1]):
n_label.append([label[branch], label[branch+1].split("$")[0]])
else:
n_label.append([label[branch].split("$")[-1], label[branch+1].split("$")[0]])
f_distance = []
rf_distance = []
f_label = []
f_label.append(n_label[0][0])
f_label.append(n_label[0][1])
f_distance.append(0.0)
f_distance.append(n_distance[0])
rf_distance.append(0.0)
rf_distance.append(n_distance[0])
length = n_distance[0]
for i in range(1, len(n_distance)):
if n_label[i][0] == n_label[i-1][1]:
f_distance.append(length)
f_distance.append(length + n_distance[i])
f_label.append(n_label[i][0])
f_label.append(n_label[i][1])
else:
f_distance.append(length + n_distance[i])
f_label[-1] = n_label[i-1][1] + "$\\mid$" + n_label[i][0]
f_label.append(n_label[i][1])
rf_distance.append(length + n_distance[i])
length += n_distance[i]
n_ticks = {'distance': f_distance, 'label': f_label}
uniq_d = []
uniq_l = []
temp_ticks = list(zip(n_ticks['distance'], n_ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(n_ticks['label'])):
if n_ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if n_ticks['label'][i] == n_ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(
i=n_ticks['label'][i]))
else:
logger.debug("Adding a line at {d}"
" for label {l}".format(
d=n_ticks['distance'][i], l=n_ticks['label'][i]))
plt.axvline(n_ticks['distance'][i], color='k')
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=n_ticks['distance'][i], l=n_ticks['label'][i]))
plt.axvline(n_ticks['distance'][i], color='k')
shift = []
br = -1
for branch in branches:
br += 1
shift.append(distance[branch]-rf_distance[br])
return plt, shift
class BSDOSPlotter(object):
"""
A joint, aligned band structure and density of states plot. Contributions
from Jan Pohls as well as the online example from Germain Salvato-Vallverdu:
http://gvallver.perso.univ-pau.fr/?p=587
"""
def __init__(self, bs_projection="elements", dos_projection="elements",
vb_energy_range=4, cb_energy_range=4, fixed_cb_energy=False,
egrid_interval=1, font="Times New Roman", axis_fontsize=20,
tick_fontsize=15, legend_fontsize=14, bs_legend="best",
dos_legend="best", rgb_legend=True, fig_size=(11, 8.5)):
"""
Instantiate plotter settings.
Args:
bs_projection (str): "elements" or None
dos_projection (str): "elements", "orbitals", or None
vb_energy_range (float): energy in eV to show of valence bands
cb_energy_range (float): energy in eV to show of conduction bands
fixed_cb_energy (bool): If true, the cb_energy_range will be interpreted
as constant (i.e., no gap correction for cb energy)
egrid_interval (float): interval for grid marks
font (str): font family
axis_fontsize (float): font size for axis
tick_fontsize (float): font size for axis tick labels
legend_fontsize (float): font size for legends
bs_legend (str): matplotlib string location for legend or None
dos_legend (str): matplotlib string location for legend or None
rgb_legend (bool): (T/F) whether to draw RGB triangle/bar for element proj.
fig_size(tuple): dimensions of figure size (width, height)
"""
self.bs_projection = bs_projection
self.dos_projection = dos_projection
self.vb_energy_range = vb_energy_range
self.cb_energy_range = cb_energy_range
self.fixed_cb_energy = fixed_cb_energy
self.egrid_interval = egrid_interval
self.font = font
self.axis_fontsize = axis_fontsize
self.tick_fontsize = tick_fontsize
self.legend_fontsize = legend_fontsize
self.bs_legend = bs_legend
self.dos_legend = dos_legend
self.rgb_legend = rgb_legend
self.fig_size = fig_size
def get_plot(self, bs, dos):
"""
Get a matplotlib plot object.
Args:
bs (BandStructureSymmLine): the bandstructure to plot. Projection
data must exist for projected plots.
dos (Dos): the Dos to plot. Projection data must exist (i.e.,
CompleteDos) for projected plots.
Returns:
matplotlib.pyplot object on which you can call commands like show()
and savefig()
"""
import matplotlib.lines as mlines
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as mplt
# make sure the user-specified band structure projection is valid
elements = [e.symbol for e in dos.structure.composition.elements]
bs_projection = self.bs_projection
rgb_legend = self.rgb_legend and bs_projection and \
bs_projection.lower() == "elements" and \
len(elements) in [2, 3]
if bs_projection and bs_projection.lower() == "elements" and \
(len(elements) not in [2, 3] or not bs.get_projection_on_elements()):
warnings.warn(
"Cannot get element projected data; either the projection data "
"doesn't exist, or you don't have a compound with exactly 2 or 3"
" unique elements.")
bs_projection = None
# specify energy range of plot
emin = -self.vb_energy_range
emax = self.cb_energy_range if self.fixed_cb_energy else \
self.cb_energy_range + bs.get_band_gap()["energy"]
# initialize all the k-point labels and k-point x-distances for bs plot
xlabels = [] # all symmetry point labels on x-axis
xlabel_distances = [] # positions of symmetry point x-labels
x_distances = [] # x positions of kpoint data
prev_right_klabel = None # used to determine which branches require a midline separator
for idx, l in enumerate(bs.branches):
# get left and right kpoint labels of this branch
left_k, right_k = l["name"].split("-")
# add $ notation for LaTeX kpoint labels
if left_k[0] == "\\" or "_" in left_k:
left_k = "$"+left_k+"$"
if right_k[0] == "\\" or "_" in right_k:
right_k = "$"+right_k+"$"
# add left k label to list of labels
if prev_right_klabel is None:
xlabels.append(left_k)
xlabel_distances.append(0)
elif prev_right_klabel != left_k: # used for pipe separator
xlabels[-1] = xlabels[-1]+ "$\\mid$ " + left_k
# add right k label to list of labels
xlabels.append(right_k)
prev_right_klabel = right_k
# add x-coordinates for labels
left_kpoint = bs.kpoints[l["start_index"]].cart_coords
right_kpoint = bs.kpoints[l["end_index"]].cart_coords
distance = np.linalg.norm(right_kpoint - left_kpoint)
xlabel_distances.append(xlabel_distances[-1] + distance)
# add x-coordinates for kpoint data
npts = l["end_index"] - l["start_index"]
distance_interval = distance/npts
x_distances.append(xlabel_distances[-2])
for i in range(npts):
x_distances.append(x_distances[-1] + distance_interval)
# set up bs and dos plot
gs = GridSpec(1, 2, width_ratios=[2, 1])
fig = mplt.figure(figsize=self.fig_size)
fig.patch.set_facecolor('white')
bs_ax = mplt.subplot(gs[0])
dos_ax = mplt.subplot(gs[1])
# set basic axes limits for the plot
bs_ax.set_xlim(0, x_distances[-1])
bs_ax.set_ylim(emin, emax)
dos_ax.set_ylim(emin, emax)
# add BS xticks, labels, etc.
bs_ax.set_xticks(xlabel_distances)
bs_ax.set_xticklabels(xlabels, size=self.tick_fontsize)
bs_ax.set_xlabel('Wavevector $k$', fontsize=self.axis_fontsize, family=self.font)
bs_ax.set_ylabel('$E-E_F$ / eV', fontsize=self.axis_fontsize, family=self.font)
# add BS fermi level line at E=0 and gridlines
bs_ax.hlines(y=0, xmin=0, xmax=x_distances[-1], color="k", lw=2)
bs_ax.set_yticks(np.arange(emin, emax+1E-5, self.egrid_interval))
bs_ax.set_yticklabels(np.arange(emin, emax+1E-5, self.egrid_interval),
size=self.tick_fontsize)
dos_ax.set_yticks(np.arange(emin, emax+1E-5, self.egrid_interval))
bs_ax.set_axisbelow(True)
bs_ax.grid(color=[0.5, 0.5, 0.5], linestyle='dotted', linewidth=1)
dos_ax.set_yticklabels([])
dos_ax.grid(color=[0.5, 0.5, 0.5], linestyle='dotted', linewidth=1)
# renormalize the band energy to the Fermi level
band_energies = {}
for spin in (Spin.up, Spin.down):
if spin in bs.bands:
band_energies[spin] = []
for band in bs.bands[spin]:
band_energies[spin].append([e - bs.efermi for e in band])
# renormalize the DOS energies to Fermi level
dos_energies = [e - dos.efermi for e in dos.energies]
# get the projection data to set colors for the band structure
colordata = self._get_colordata(bs, elements, bs_projection)
# plot the colored band structure lines
for spin in (Spin.up, Spin.down):
if spin in band_energies:
linestyles = "solid" if spin == Spin.up else "dotted"
for band_idx, band in enumerate(band_energies[spin]):
self._rgbline(bs_ax, x_distances, band,
colordata[spin][band_idx, :, 0],
colordata[spin][band_idx, :, 1],
colordata[spin][band_idx, :, 2],
linestyles=linestyles)
# Plot the DOS and projected DOS
for spin in (Spin.up, Spin.down):
if spin in dos.densities:
# plot the total DOS
dos_densities = dos.densities[spin] * int(spin)
label = "total" if spin == Spin.up else None
dos_ax.plot(dos_densities, dos_energies, color=(0.6, 0.6, 0.6),
label=label)
dos_ax.fill_between(dos_densities, 0, dos_energies,
color=(0.7, 0.7, 0.7),
facecolor=(0.7, 0.7, 0.7))
# plot the atom-projected DOS
if self.dos_projection.lower() == "elements":
colors = ['b', 'r', 'g', 'm', 'y', 'c', 'k', 'w']
el_dos = dos.get_element_dos()
for idx, el in enumerate(elements):
dos_densities = el_dos[Element(el)].densities[spin] *\
int(spin)
label = el if spin == Spin.up else None
dos_ax.plot(dos_densities, dos_energies,
color=colors[idx], label=label)
elif self.dos_projection.lower() == "orbitals":
# plot each of the atomic projected DOS
colors = ['b', 'r', 'g', 'm']
spd_dos = dos.get_spd_dos()
for idx, orb in enumerate([OrbitalType.s, OrbitalType.p,
OrbitalType.d, OrbitalType.f]):
if orb in spd_dos:
dos_densities = spd_dos[orb].densities[spin] *\
int(spin)
label = orb if spin == Spin.up else None
dos_ax.plot(dos_densities, dos_energies,
color=colors[idx], label=label)
# get index of lowest and highest energy being plotted, used to help auto-scale DOS x-axis
emin_idx = next(x[0] for x in enumerate(dos_energies) if x[1] >= emin)
emax_idx = len(dos_energies) - next(x[0] for x in
enumerate(reversed(dos_energies))
if x[1] <= emax)
# determine DOS x-axis range
dos_xmin = 0 if Spin.down not in dos.densities else -max(
dos.densities[Spin.down][emin_idx:emax_idx+1] * 1.05)
dos_xmax = max([max(dos.densities[Spin.up][emin_idx:emax_idx]) *
1.05, abs(dos_xmin)])
# set up the DOS x-axis and add Fermi level line
dos_ax.set_xlim(dos_xmin, dos_xmax)
dos_ax.set_xticklabels([])
dos_ax.hlines(y=0, xmin=dos_xmin, xmax=dos_xmax, color="k", lw=2)
dos_ax.set_xlabel('DOS', fontsize=self.axis_fontsize, family=self.font)
# add legend for band structure
if self.bs_legend and not rgb_legend:
handles = []
if bs_projection is None:
handles = [mlines.Line2D([], [], linewidth=2,
color='k', label='spin up'),
mlines.Line2D([], [], linewidth=2,
color='b', linestyle="dotted",
label='spin down')]
elif bs_projection.lower() == "elements":
colors = ['b', 'r', 'g']
for idx, el in enumerate(elements):
handles.append(mlines.Line2D([], [],
linewidth=2,
color=colors[idx], label=el))
bs_ax.legend(handles=handles, fancybox=True,
prop={'size': self.legend_fontsize,
'family': self.font}, loc=self.bs_legend)
elif self.bs_legend and rgb_legend:
if len(elements) == 2:
self._rb_line(bs_ax, elements[1], elements[0],
loc=self.bs_legend)
elif len(elements) == 3:
self._rgb_triangle(bs_ax, elements[1], elements[2], elements[0],
loc=self.bs_legend)
# add legend for DOS
if self.dos_legend:
dos_ax.legend(fancybox=True, prop={'size': self.legend_fontsize,
'family': self.font},
loc=self.dos_legend)
mplt.subplots_adjust(wspace=0.1)
return mplt
@staticmethod
def _rgbline(ax, k, e, red, green, blue, alpha=1, linestyles="solid"):
"""
An RGB colored line for plotting.
creation of segments based on:
http://nbviewer.ipython.org/urls/raw.github.com/dpsanders/matplotlib-examples/master/colorline.ipynb
Args:
ax: matplotlib axis
k: x-axis data (k-points)
e: y-axis data (energies)
red: red data
green: green data
blue: blue data
alpha: alpha values data
linestyles: linestyle for plot (e.g., "solid" or "dotted")
"""
from matplotlib.collections import LineCollection
pts = np.array([k, e]).T.reshape(-1, 1, 2)
seg = np.concatenate([pts[:-1], pts[1:]], axis=1)
nseg = len(k) - 1
r = [0.5 * (red[i] + red[i + 1]) for i in range(nseg)]
g = [0.5 * (green[i] + green[i + 1]) for i in range(nseg)]
b = [0.5 * (blue[i] + blue[i + 1]) for i in range(nseg)]
a = np.ones(nseg, np.float) * alpha
lc = LineCollection(seg, colors=list(zip(r, g, b, a)),
linewidth=2, linestyles=linestyles)
ax.add_collection(lc)
@staticmethod
def _get_colordata(bs, elements, bs_projection):
"""
Get color data, including projected band structures
Args:
bs: Bandstructure object
elements: elements (in desired order) for setting to blue, red, green
bs_projection: None for no projection, "elements" for element projection
Returns:
"""
contribs = {}
if bs_projection and bs_projection.lower() == "elements":
projections = bs.get_projection_on_elements()
for spin in (Spin.up, Spin.down):
if spin in bs.bands:
contribs[spin] = []
for band_idx in range(bs.nb_bands):
colors = []
for k_idx in range(len(bs.kpoints)):
if bs_projection and bs_projection.lower() == "elements":
c = [0, 0, 0]
projs = projections[spin][band_idx][k_idx]
# note: squared color interpolations are smoother
# see: https://youtu.be/LKnqECcg6Gw
projs = dict([(k, v**2) for k, v in projs.items()])
total = sum(projs.values())
if total > 0:
for idx, e in enumerate(elements):
c[idx] = math.sqrt(projs[e]/total) # min is to handle round errors
c = [c[1], c[2], c[0]] # prefer blue, then red, then green
else:
c = [0, 0, 0] if spin == Spin.up \
else [0, 0, 1] # black for spin up, blue for spin down
colors.append(c)
contribs[spin].append(colors)
contribs[spin] = np.array(contribs[spin])
return contribs
@staticmethod
def _rgb_triangle(ax, r_label, g_label, b_label, loc):
"""
Draw an RGB triangle legend on the desired axis
"""
if not loc in range(1, 11):
loc = 2
from mpl_toolkits.axes_grid.inset_locator import inset_axes
inset_ax = inset_axes(ax, width=1, height=1, loc=loc)
mesh = 35
x = []
y = []
color = []
for r in range(0, mesh):
for g in range(0, mesh):
for b in range(0, mesh):
if not (r == 0 and b == 0 and g == 0):
r1 = r / (r + g + b)
g1 = g / (r + g + b)
b1 = b / (r + g + b)
x.append(0.33 * (2. * g1 + r1) / (r1 + b1 + g1))
y.append(0.33 * np.sqrt(3) * r1 / (r1 + b1 + g1))
rc = math.sqrt(r**2 / (r**2 + g**2 + b**2))
gc = math.sqrt(g**2 / (r**2 + g**2 + b**2))
bc = math.sqrt(b**2 / (r**2 + g**2 + b**2))
color.append([rc, gc, bc])
# x = [n + 0.25 for n in x] # nudge x coordinates
# y = [n + (max_y - 1) for n in y] # shift y coordinates to top
# plot the triangle
inset_ax.scatter(x, y, s=7, marker='.', edgecolor=color)
inset_ax.set_xlim([-0.35, 1.00])
inset_ax.set_ylim([-0.35, 1.00])
# add the labels
inset_ax.text(0.70, -0.2, g_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment='left')
inset_ax.text(0.325, 0.70, r_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment='center')
inset_ax.text(-0.05, -0.2, b_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment='right')
inset_ax.get_xaxis().set_visible(False)
inset_ax.get_yaxis().set_visible(False)
@staticmethod
def _rb_line(ax, r_label, b_label, loc):
# Draw an rb bar legend on the desired axis
if not loc in range(1, 11):
loc = 2
from mpl_toolkits.axes_grid.inset_locator import inset_axes
inset_ax = inset_axes(ax, width=1.2, height=0.4, loc=loc)
x = []
y = []
color = []
for i in range(0, 1000):
x.append(i / 1800. + 0.55)
y.append(0)
color.append([math.sqrt(c) for c in
[1 - (i/1000)**2, 0, (i / 1000)**2]])
# plot the bar
inset_ax.scatter(x, y, s=250., marker='s', edgecolor=color)
inset_ax.set_xlim([-0.1, 1.7])
inset_ax.text(1.35, 0, b_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment="left", verticalalignment="center")
inset_ax.text(0.30, 0, r_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment="right", verticalalignment="center")
inset_ax.get_xaxis().set_visible(False)
inset_ax.get_yaxis().set_visible(False)
class BoltztrapPlotter(object):
"""
class containing methods to plot the data from Boltztrap.
Args:
bz: a BoltztrapAnalyzer object
"""
def __init__(self, bz):
self._bz = bz
def _plot_doping(self, temp):
import matplotlib.pyplot as plt
if len(self._bz.doping) != 0:
limit = 2.21e15
plt.axvline(self._bz.mu_doping['n'][temp][0], linewidth=3.0,
linestyle="--")
plt.text(self._bz.mu_doping['n'][temp][0] + 0.01,
limit,
"$n$=10$^{" + str(
math.log10(self._bz.doping['n'][0])) + "}$",
color='b')
plt.axvline(self._bz.mu_doping['n'][temp][-1], linewidth=3.0,
linestyle="--")
plt.text(self._bz.mu_doping['n'][temp][-1] + 0.01,
limit,
"$n$=10$^{" + str(math.log10(self._bz.doping['n'][-1]))
+ "}$", color='b')
plt.axvline(self._bz.mu_doping['p'][temp][0], linewidth=3.0,
linestyle="--")
plt.text(self._bz.mu_doping['p'][temp][0] + 0.01,
limit,
"$p$=10$^{" + str(
math.log10(self._bz.doping['p'][0])) + "}$",
color='b')
plt.axvline(self._bz.mu_doping['p'][temp][-1], linewidth=3.0,
linestyle="--")
plt.text(self._bz.mu_doping['p'][temp][-1] + 0.01,
limit, "$p$=10$^{" +
str(math.log10(self._bz.doping['p'][-1])) + "}$",
color='b')
def _plot_bg_limits(self):
import matplotlib.pyplot as plt
plt.axvline(0.0, color='k', linewidth=3.0)
plt.axvline(self._bz.gap, color='k', linewidth=3.0)
def plot_seebeck_mu(self, temp=600, output='eig', xlim=None):
"""
Plot the seebeck coefficient in function of Fermi level
Args:
temp:
the temperature
xlim:
a list of min and max fermi energy by default (0, and band gap)
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
seebeck = self._bz.get_seebeck(output=output, doping_levels=False)[
temp]
plt.plot(self._bz.mu_steps, seebeck,
linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['S$_1$', 'S$_2$', 'S$_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim[0], xlim[1])
plt.ylabel("Seebeck \n coefficient ($\\mu$V/K)", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt
def plot_conductivity_mu(self, temp=600, output='eig',
relaxation_time=1e-14, xlim=None):
"""
Plot the conductivity in function of Fermi level. Semi-log plot
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
cond = self._bz.get_conductivity(relaxation_time=relaxation_time,
output=output, doping_levels=False)[
temp]
plt.semilogy(self._bz.mu_steps, cond, linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['$\\Sigma_1$', '$\\Sigma_2$', '$\\Sigma_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim)
plt.ylim([1e13 * relaxation_time, 1e20 * relaxation_time])
plt.ylabel("conductivity,\n $\\Sigma$ (1/($\\Omega$ m))", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt
def plot_power_factor_mu(self, temp=600, output='eig',
relaxation_time=1e-14, xlim=None):
"""
Plot the power factor in function of Fermi level. Semi-log plot
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
pf = self._bz.get_power_factor(relaxation_time=relaxation_time,
output=output, doping_levels=False)[
temp]
plt.semilogy(self._bz.mu_steps, pf, linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['PF$_1$', 'PF$_2$', 'PF$_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim)
plt.ylabel("Power factor, ($\\mu$W/(mK$^2$))", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt
def plot_zt_mu(self, temp=600, output='eig', relaxation_time=1e-14,
xlim=None):
"""
Plot the ZT in function of Fermi level.
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
zt = self._bz.get_zt(relaxation_time=relaxation_time, output=output,
doping_levels=False)[temp]
plt.plot(self._bz.mu_steps, zt, linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['ZT$_1$', 'ZT$_2$', 'ZT$_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim)
plt.ylabel("ZT", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt
def plot_dos(self, sigma=0.05):
"""
plot dos
Args:
sigma: a smearing
Returns:
a matplotlib object
"""
plotter = DosPlotter(sigma=sigma)
plotter.add_dos("t", self._bz.dos)
return plotter.get_plot()
def plot_carriers(self, temp=300):
"""
Plot the carrier concentration in function of Fermi level
Args:
temp: the temperature
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
plt.semilogy(self._bz.mu_steps,
abs(self._bz.carrier_conc[temp] / (self._bz.vol * 1e-24)),
linewidth=3.0, color='r')
self._plot_bg_limits()
self._plot_doping(temp)
plt.xlim(-0.5, self._bz.gap + 0.5)
plt.ylim(1e14, 1e22)
plt.ylabel("carrier concentration (cm-3)", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt
def plot_hall_carriers(self, temp=300):
"""
Plot the Hall carrier concentration in function of Fermi level
Args:
temp: the temperature
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
hall_carriers = [abs(i) for i in
self._bz.get_hall_carrier_concentration()[temp]]
plt.semilogy(self._bz.mu_steps,
hall_carriers,
linewidth=3.0, color='r')
self._plot_bg_limits()
self._plot_doping(temp)
plt.xlim(-0.5, self._bz.gap + 0.5)
plt.ylim(1e14, 1e22)
plt.ylabel("Hall carrier concentration (cm-3)", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt
def plot_fermi_surface(data, structure, cbm, energy_levels=[], multiple_figure=True,
mlab_figure = None, kpoints_dict={}, color=(0,0,1),
transparency_factor=[], labels_scale_factor=0.05,
points_scale_factor=0.02, interative=True):
"""
Plot the Fermi surface at specific energy value.
Args:
data: energy values in a 3D grid from a CUBE file
via read_cube_file function, or from a
BoltztrapAnalyzer.fermi_surface_data
structure: structure object of the material
energy_levels: list of energy value of the fermi surface.
Default: max energy value + 0.01 eV
cbm: Boolean value to specify if the considered band is
a conduction band or not
multiple_figure: if True a figure for each energy level will be shown.
If False all the surfaces will be shown in the same figure.
In this las case, tune the transparency factor.
mlab_figure: provide a previous figure to plot a new surface on it.
kpoints_dict: dictionary of kpoints to show in the plot.
example: {"K":[0.5,0.0,0.5]},
where the coords are fractional.
color: tuple (r,g,b) of integers to define the color of the surface.
transparency_factor: list of values in the range [0,1] to tune
the opacity of the surfaces.
labels_scale_factor: factor to tune the size of the kpoint labels
points_scale_factor: factor to tune the size of the kpoint points
interative: if True an interactive figure will be shown.
If False a non interactive figure will be shown, but
it is possible to plot other surfaces on the same figure.
To make it interactive, run mlab.show().
Returns:
a Mayavi figure and a mlab module to control the plot.
Note: Experimental.
Please, double check the surface shown by using some
other software and report issues.
"""
try:
from mayavi import mlab
except ImportError:
raise BoltztrapError(
"Mayavi package should be installed to use this function")
bz = structure.lattice.reciprocal_lattice.get_wigner_seitz_cell()
cell = structure.lattice.reciprocal_lattice.matrix
fact = 1 if cbm == False else -1
en_min = np.min(fact*data.ravel())
en_max = np.max(fact*data.ravel())
if energy_levels == []:
energy_levels = [en_min + 0.01] if cbm == True else \
[en_max - 0.01]
print("Energy level set to: " + str(energy_levels[0])+" eV")
else:
for e in energy_levels:
if e > en_max or e < en_min:
raise BoltztrapError("energy level " + str(e) +
" not in the range of possible energies: [" +
str(en_min) + ", " + str(en_max) + "]")
if transparency_factor == []:
transparency_factor = [1]*len(energy_levels)
if mlab_figure:
fig=mlab_figure
if mlab_figure == None and not multiple_figure:
fig = mlab.figure(size = (1024,768),bgcolor = (1,1,1))
for iface in range(len(bz)):
for line in itertools.combinations(bz[iface], 2):
for jface in range(len(bz)):
if iface < jface and any(np.all(line[0] == x)
for x in bz[jface]) and \
any(np.all(line[1] == x)
for x in bz[jface]):
mlab.plot3d(*zip(line[0], line[1]),color=(0,0,0),
tube_radius=None, figure = fig)
for label,coords in kpoints_dict.iteritems():
label_coords = structure.lattice.reciprocal_lattice \
.get_cartesian_coords(coords)
mlab.points3d(*label_coords, scale_factor=points_scale_factor, color=(0,0,0), figure = fig)
mlab.text3d(*label_coords, text=label, scale=labels_scale_factor, color=(0,0,0), figure = fig)
for isolevel,alpha in zip(energy_levels,transparency_factor):
if multiple_figure:
fig = mlab.figure(size = (1024,768),bgcolor = (1,1,1))
for iface in range(len(bz)):
for line in itertools.combinations(bz[iface], 2):
for jface in range(len(bz)):
if iface < jface and any(np.all(line[0] == x)
for x in bz[jface]) and \
any(np.all(line[1] == x)
for x in bz[jface]):
mlab.plot3d(*zip(line[0], line[1]),color=(0,0,0),
tube_radius=None, figure = fig)
for label,coords in kpoints_dict.iteritems():
label_coords = structure.lattice.reciprocal_lattice \
.get_cartesian_coords(coords)
mlab.points3d(*label_coords, scale_factor=points_scale_factor, color=(0,0,0), figure = fig)
mlab.text3d(*label_coords, text=label, scale=labels_scale_factor, color=(0,0,0), figure = fig)
cp = mlab.contour3d(fact*data,contours=[isolevel], transparent=True,
colormap='hot', color=color, opacity=alpha, figure = fig)
polydata = cp.actor.actors[0].mapper.input
pts = np.array(polydata.points) #- 1
polydata.points = np.dot(pts,
cell / np.array(data.shape)[:, np.newaxis])
cx,cy,cz = [np.mean(np.array(polydata.points)[:, i])
for i in range(3)]
polydata.points = (np.array(polydata.points) - [cx,cy,cz]) * 2
mlab.view(distance='auto')
if interative == True:
mlab.show()
return fig, mlab
def plot_wigner_seitz(lattice, ax=None, **kwargs):
"""
Adds the skeleton of the Wigner-Seitz cell of the lattice to a matplotlib Axes
Args:
lattice: Lattice object
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to black
and linewidth to 1.
Returns:
matplotlib figure and matplotlib ax
"""
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "k"
if "linewidth" not in kwargs:
kwargs["linewidth"] = 1
bz = lattice.get_wigner_seitz_cell()
ax, fig, plt = get_ax3d_fig_plt(ax)
for iface in range(len(bz)):
for line in itertools.combinations(bz[iface], 2):
for jface in range(len(bz)):
if iface < jface and any(np.all(line[0] == x) for x in bz[jface])\
and any(np.all(line[1] == x) for x in bz[jface]):
ax.plot(*zip(line[0], line[1]), **kwargs)
return fig, ax
def plot_lattice_vectors(lattice, ax=None, **kwargs):
"""
Adds the basis vectors of the lattice provided to a matplotlib Axes
Args:
lattice: Lattice object
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to green
and linewidth to 3.
Returns:
matplotlib figure and matplotlib ax
"""
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "g"
if "linewidth" not in kwargs:
kwargs["linewidth"] = 3
vertex1 = lattice.get_cartesian_coords([0.0, 0.0, 0.0])
vertex2 = lattice.get_cartesian_coords([1.0, 0.0, 0.0])
ax.plot(*zip(vertex1, vertex2), **kwargs)
vertex2 = lattice.get_cartesian_coords([0.0, 1.0, 0.0])
ax.plot(*zip(vertex1, vertex2), **kwargs)
vertex2 = lattice.get_cartesian_coords([0.0, 0.0, 1.0])
ax.plot(*zip(vertex1, vertex2), **kwargs)
return fig, ax
def plot_path(line, lattice=None, coords_are_cartesian=False, ax=None, **kwargs):
"""
Adds a line passing through the coordinates listed in 'line' to a matplotlib Axes
Args:
line: list of coordinates.
lattice: Lattice object used to convert from reciprocal to cartesian coordinates
coords_are_cartesian: Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
Requires lattice if False.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to red
and linewidth to 3.
Returns:
matplotlib figure and matplotlib ax
"""
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "r"
if "linewidth" not in kwargs:
kwargs["linewidth"] = 3
for k in range(1, len(line)):
vertex1 = line[k-1]
vertex2 = line[k]
if not coords_are_cartesian:
if lattice is None:
raise ValueError("coords_are_cartesian False requires the lattice")
vertex1 = lattice.get_cartesian_coords(vertex1)
vertex2 = lattice.get_cartesian_coords(vertex2)
ax.plot(*zip(vertex1, vertex2), **kwargs)
return fig, ax
def plot_labels(labels, lattice=None, coords_are_cartesian=False, ax=None, **kwargs):
"""
Adds labels to a matplotlib Axes
Args:
labels: dict containing the label as a key and the coordinates as value.
lattice: Lattice object used to convert from reciprocal to cartesian coordinates
coords_are_cartesian: Set to True if you are providing.
coordinates in cartesian coordinates. Defaults to False.
Requires lattice if False.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'text'. Color defaults to blue
and size to 25.
Returns:
matplotlib figure and matplotlib ax
"""
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "b"
if "size" not in kwargs:
kwargs["size"] = 25
for k, coords in labels.items():
label = k
if k.startswith("\\") or k.find("_") != -1:
label = "$" + k + "$"
off = 0.01
if coords_are_cartesian:
coords = np.array(coords)
else:
if lattice is None:
raise ValueError("coords_are_cartesian False requires the lattice")
coords = lattice.get_cartesian_coords(coords)
ax.text(*(coords + off), s=label, **kwargs)
return fig, ax
def fold_point(p, lattice, coords_are_cartesian=False):
"""
Folds a point with coordinates p inside the first Brillouin zone of the lattice.
Args:
p: coordinates of one point
lattice: Lattice object used to convert from reciprocal to cartesian coordinates
coords_are_cartesian: Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
Returns:
The cartesian coordinates folded inside the first Brillouin zone
"""
if coords_are_cartesian:
p = lattice.get_fractional_coords(p)
else:
p = np.array(p)
p = np.mod(p+0.5-1e-10, 1)-0.5+1e-10
p = lattice.get_cartesian_coords(p)
closest_lattice_point = None
smallest_distance = 10000
for i in (-1, 0, 1):
for j in (-1, 0, 1):
for k in (-1, 0, 1):
lattice_point = np.dot((i, j, k), lattice.matrix)
dist = np.linalg.norm(p - lattice_point)
if closest_lattice_point is None or dist < smallest_distance:
closest_lattice_point = lattice_point
smallest_distance = dist
if not np.allclose(closest_lattice_point, (0, 0, 0)):
p = p - closest_lattice_point
return p
def plot_points(points, lattice=None, coords_are_cartesian=False, fold=False, ax=None, **kwargs):
"""
Adds Points to a matplotlib Axes
Args:
points: list of coordinates
lattice: Lattice object used to convert from reciprocal to cartesian coordinates
coords_are_cartesian: Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
Requires lattice if False.
fold: whether the points should be folded inside the first Brillouin Zone.
Defaults to False. Requires lattice if True.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'scatter'. Color defaults to blue
Returns:
matplotlib figure and matplotlib ax
"""
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "b"
if (not coords_are_cartesian or fold) and lattice is None:
raise ValueError("coords_are_cartesian False or fold True require the lattice")
for p in points:
if fold:
p = fold_point(p, lattice, coords_are_cartesian=coords_are_cartesian)
elif not coords_are_cartesian:
p = lattice.get_cartesian_coords(p)
ax.scatter(*p, **kwargs)
return fig, ax
@add_fig_kwargs
def plot_brillouin_zone_from_kpath(kpath, ax=None, **kwargs):
"""
Gives the plot (as a matplotlib object) of the symmetry line path in
the Brillouin Zone.
Args:
kpath (HighSymmKpath): a HighSymmKPath object
ax: matplotlib :class:`Axes` or None if a new figure should be created.
**kwargs: provided by add_fig_kwargs decorator
Returns:
matplotlib figure
"""
lines = [[kpath.kpath['kpoints'][k] for k in p]
for p in kpath.kpath['path']]
return plot_brillouin_zone(bz_lattice=kpath.prim_rec, lines=lines, ax=ax,
labels=kpath.kpath['kpoints'], **kwargs)
@add_fig_kwargs
def plot_brillouin_zone(bz_lattice, lines=None, labels=None, kpoints=None,
fold=False, coords_are_cartesian=False,
ax=None, **kwargs):
"""
Plots a 3D representation of the Brillouin zone of the structure.
Can add to the plot paths, labels and kpoints
Args:
bz_lattice: Lattice object of the Brillouin zone
lines: list of lists of coordinates. Each list represent a different path
labels: dict containing the label as a key and the coordinates as value.
kpoints: list of coordinates
fold: whether the points should be folded inside the first Brillouin Zone.
Defaults to False. Requires lattice if True.
coords_are_cartesian: Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: provided by add_fig_kwargs decorator
Returns:
matplotlib figure
"""
fig, ax = plot_lattice_vectors(bz_lattice, ax=ax)
plot_wigner_seitz(bz_lattice, ax=ax)
if lines is not None:
for line in lines:
plot_path(line, bz_lattice,
coords_are_cartesian=coords_are_cartesian, ax=ax)
if labels is not None:
plot_labels(labels, bz_lattice,
coords_are_cartesian=coords_are_cartesian, ax=ax)
plot_points(labels.values(), bz_lattice,
coords_are_cartesian=coords_are_cartesian,
fold=False, ax=ax)
if kpoints is not None:
plot_points(kpoints, bz_lattice,
coords_are_cartesian=coords_are_cartesian,
ax=ax, fold=fold)
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
ax.set_aspect('equal')
ax.axis("off")
return fig
def plot_ellipsoid(hessian, center, lattice=None, rescale=1.0, ax=None, coords_are_cartesian=False, **kwargs):
"""
Plots a 3D ellipsoid rappresenting the Hessian matrix in input.
Useful to get a graphical visualization of the effective mass
of a band in a single k-point.
Args:
hessian: the Hessian matrix
center: the center of the ellipsoid in reciprocal coords (Default)
lattice: Lattice object of the Brillouin zone
rescale: factor for size scaling of the ellipsoid
ax: matplotlib :class:`Axes` or None if a new figure should be created.
coords_are_cartesian: Set to True if you are providing a center in
cartesian coordinates. Defaults to False.
kwargs: kwargs passed to the matplotlib function 'plot_wireframe'.
Color defaults to blue, rstride and cstride
default to 4, alpha defaults to 0.2.
Returns:
matplotlib figure and matplotlib ax
Example of use:
fig,ax=plot_wigner_seitz(struct.reciprocal_lattice)
plot_ellipsoid(hessian,[0.0,0.0,0.0], struct.reciprocal_lattice,ax=ax)
"""
if (not coords_are_cartesian) and lattice is None:
raise ValueError("coords_are_cartesian False or fold True require the lattice")
if not coords_are_cartesian:
center = lattice.get_cartesian_coords(center)
if "color" not in kwargs:
kwargs["color"] = "b"
if "rstride" not in kwargs:
kwargs["rstride"] = 4
if "cstride" not in kwargs:
kwargs["cstride"] = 4
if "alpha" not in kwargs:
kwargs["alpha"] = 0.2
# calculate the ellipsoid
# find the rotation matrix and radii of the axes
U, s, rotation = np.linalg.svd(hessian)
radii = 1.0/np.sqrt(s)
# from polar coordinates
u = np.linspace(0.0, 2.0 * np.pi, 100)
v = np.linspace(0.0, np.pi, 100)
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
for i in range(len(x)):
for j in range(len(x)):
[x[i, j], y[i, j], z[i, j]] = np.dot([x[i, j], y[i, j], z[i, j]], rotation)*rescale + center
# add the ellipsoid to the current axes
ax, fig, plt = get_ax3d_fig_plt(ax)
ax.plot_wireframe(x, y, z, **kwargs)
return fig, ax
| mit |
shyamalschandra/scikit-learn | sklearn/neighbors/regression.py | 32 | 11019 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
bmroach/Reverb-Simulator | Working-Directory/Reverb/mainReverb.py | 2 | 2406 | """
Filename: mainReverb.py
See README.md
Developed under the Apache License 2.0
"""
#______________________________________________________________________________
#Header Imports
import array
import contextlib
import wave
import matplotlib.pyplot as plt
import numpy as np
import math
import copy
import sys
sys.path.append('../Utilities')
import utilities as ut
#______________________________________________________________________________
#Start mainReverb.py
# Global parameters
dirIn = "../../Original-Audio-Samples/"
dirOut = "../../Output-Audio-Samples/Reverb/"
numChannels = 1 # mono
sampleWidth = 2 # in bytes, a 16-bit short
sampleRate = 44100
#______________________________________________________________________________
def reverb(signal, preDelay = 0, Decay = .25, trim = True):
"""fileName: name of file in string form
preDelay: delay before reverb begins (seconds)
Decay: hang time of signal (seconds)
"""
signal = [int(x) for x in signal]
pdSamples = int(preDelay * sampleRate)
dSamples = int(Decay * sampleRate)
if trim: #trim to 10 seconds
signal = signal[:441000]
lengthIn = len(signal)
logArray = [(math.e**(-1*(x/dSamples))) for x in range(dSamples)]
avg = ut.signalAvg(signal)[0]
goalAmp = avg * 1.2
outputSignal = [0 for x in range(len(signal) + pdSamples + dSamples)]
length = len(outputSignal)
for i in range(lengthIn): #for all input samples
currentSample = signal[i]
outputSignal[i] = currentSample
for x in range(1,dSamples): #for all reverb samples
index = i + x + pdSamples
outputSignal[index] += (currentSample * logArray[x])
while ut.signalAvg(outputSignal)[0] > goalAmp:
outputSignal = [x*.90 for x in outputSignal]
outputSignal = [int(x) for x in outputSignal]
return outputSignal
def reverbDemo():
# jfk = ut.readWaveFile(dirIn + "jfk.wav")
# jfkReverb = reverb(jfk)
# ut.writeWaveFile(dirOut + "JFK_Distortion.wav", jfkDist)
piano = ut.readWaveFile(dirIn+"piano.wav")
pianoReverb = reverb(piano)
ut.writeWaveFile(dirOut + "Piano_Reverb.wav", pianoReverb)
print("Reverb Demo Complete.")
reverbDemo()
| apache-2.0 |
IndraVikas/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
slinderman/pyhsmm_spiketrains | experiments/fit_hipp_data.py | 1 | 12257 | """
Fit a sequence of models to the rat hippocampal recordings
"""
import os
import time
import gzip
import cPickle
import numpy as np
from scipy.io import loadmat
from collections import namedtuple
from pybasicbayes.util.text import progprint_xrange
import matplotlib.pyplot as plt
import brewer2mpl
allcolors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
import pyhsmm_spiketrains.models
reload(pyhsmm_spiketrains.models)
from pyhsmm_spiketrains.internals.utils import \
log_expected_pll, split_train_test
# Set the seed
# seed = np.random.randint(0, 2**16)
seed = 0
print "setting seed to ", seed
np.random.seed(seed)
def load_hipp_data(dataname="hipp_2dtrack_b", trainfrac=0.8):
raw_data = loadmat("data/%s.mat" % dataname)
S = raw_data['S'].astype(np.int).copy("C")
# Get the time stamps
T,N = S.shape
dt = 0.25
ts = np.arange(T) * dt
# Get the corresponding position
pos = raw_data['pos']
S_train, pos_train, S_test, pos_test = split_train_test(S, pos, trainfrac=trainfrac)
if "cp" in raw_data and "r" in raw_data:
center = raw_data['cp'].ravel()
radius = np.float(raw_data['r'])
else:
center = radius = None
return N, S_train, pos_train, S_test, pos_test, center, radius
Results = namedtuple(
"Results", ["name", "loglikes", "predictive_lls",
"N_used", "alphas", "gammas",
"rates", "obs_hypers",
"samples", "timestamps"])
def fit(name, model, test_data, N_iter=1000, init_state_seq=None):
def evaluate(model):
ll = model.log_likelihood()
pll = model.log_likelihood(test_data)
N_used = len(model.used_states)
trans = model.trans_distn
alpha = trans.alpha
gamma = trans.gamma if hasattr(trans, "gamma") else None
rates = model.rates.copy()
obs_hypers = model.obs_hypers
# print 'N_states: {}, \tPLL:{}\n'.format(len(model.used_states), pll),
return ll, pll, N_used, alpha, gamma, rates, obs_hypers
def sample(model):
tic = time.time()
model.resample_model()
timestep = time.time() - tic
return evaluate(model), timestep
# Initialize with given state seq
if init_state_seq is not None:
model.states_list[0].stateseq = init_state_seq
for _ in xrange(100):
model.resample_obs_distns()
init_val = evaluate(model)
vals, timesteps = zip(*[sample(model) for _ in progprint_xrange(N_iter)])
lls, plls, N_used, alphas, gammas, rates, obs_hypers = \
zip(*((init_val,) + vals))
timestamps = np.cumsum((0.,) + timesteps)
return Results(name, lls, plls, N_used, alphas, gammas,
rates, obs_hypers,
model.copy_sample(), timestamps)
def fit_vb(name, model, test_data, N_iter=1000, init_state_seq=None):
def evaluate(model):
ll = model.log_likelihood()
pll = model.log_likelihood(test_data)
N_used = len(model.used_states)
trans = model.trans_distn
alpha = trans.alpha
gamma = trans.gamma if hasattr(trans, "gamma") else None
rates = model.rates.copy()
obs_hypers = model.obs_hypers
# print 'N_states: {}, \tPLL:{}\n'.format(len(model.used_states), pll),
return ll, pll, N_used, alpha, gamma, rates, obs_hypers
def sample(model):
tic = time.time()
model.meanfield_coordinate_descent_step()
timestep = time.time() - tic
# Resample from mean field posterior
model._resample_from_mf()
return evaluate(model), timestep
# Initialize with given state seq
if init_state_seq is not None:
model.states_list[0].stateseq = init_state_seq
for _ in xrange(100):
model.resample_obs_distns()
init_val = evaluate(model)
vals, timesteps = zip(*[sample(model) for _ in progprint_xrange(200)])
lls, plls, N_used, alphas, gammas, rates, obs_hypers = \
zip(*((init_val,) + vals))
timestamps = np.cumsum((0.,) + timesteps)
return Results(name, lls, plls, N_used, alphas, gammas,
rates, obs_hypers,
model.copy_sample(), timestamps)
def make_hmm_models(N, S_train, Ks=np.arange(5,25, step=5), **kwargs):
# Define a sequence of models
names_list = []
fnames_list = []
hmm_list = []
color_list = []
for K in Ks:
names_list.append("HMM (K=%d)" % K)
fnames_list.append("hmm_K%d" % K)
color_list.append(allcolors[0])
hmm = \
pyhsmm_spiketrains.models.PoissonHMM(
N=N, K=K, alpha_a_0=5.0, alpha_b_0=1.0,
init_state_concentration=1.0,
**kwargs)
hmm.add_data(S_train)
hmm_list.append(hmm)
return names_list, fnames_list, color_list, hmm_list
def make_hdphmm_models(N, S_train, K_max=100, alpha_obs=1.0, beta_obs=1.0):
# Define a sequence of models
names_list = []
fnames_list = []
hmm_list = []
color_list = []
method_list = []
# Standard HDP-HMM (Scale resampling)
names_list.append("HDP-HMM (Scale)")
fnames_list.append("hdphmm_scale")
color_list.append(allcolors[1])
hmm = \
pyhsmm_spiketrains.models.PoissonHDPHMM(
N=N, K_max=K_max,
alpha_a_0=5.0, alpha_b_0=1.0,
gamma_a_0=5.0, gamma_b_0=1.0,
init_state_concentration=1.0,
alpha_obs=alpha_obs,
beta_obs=beta_obs)
hmm.add_data(S_train)
hmm_list.append(hmm)
method_list.append(fit)
# Standard HDP-HSMM (Scale resampling)
names_list.append("HDP-HSMM (Scale)")
fnames_list.append("hdphsmm_scale")
color_list.append(allcolors[1])
hmm = \
pyhsmm_spiketrains.models.PoissonIntNegBinHDPHSMM(
N=N, K_max=K_max,
alpha_a_0=5.0, alpha_b_0=1.0,
gamma_a_0=5.0, gamma_b_0=1.0,
init_state_concentration=1.0,
alpha_obs=alpha_obs,
beta_obs=beta_obs)
hmm.add_data(S_train)
hmm_list.append(hmm)
method_list.append(fit)
# Vary the hyperparameters of the scale resampling model
for alpha_a_0 in [1.0, 5.0, 10.0, 100.0]:
names_list.append("HDP-HMM (Scale)")
fnames_list.append("hdphmm_scale_alpha_a_0%.1f" % alpha_a_0)
color_list.append(allcolors[1])
hmm = \
pyhsmm_spiketrains.models.PoissonHDPHMM(
N=N, K_max=K_max,
alpha_a_0=alpha_a_0, alpha_b_0=1.0,
gamma_a_0=5.0, gamma_b_0=1.0,
init_state_concentration=1.0,
alpha_obs=alpha_obs,
beta_obs=beta_obs)
hmm.add_data(S_train)
hmm_list.append(hmm)
method_list.append(fit)
for gamma_a_0 in [1.0, 5.0, 10.0, 100.0]:
names_list.append("HDP-HMM (Scale)")
fnames_list.append("hdphmm_scale_gamma_a_0%.1f" % gamma_a_0)
color_list.append(allcolors[1])
hmm = \
pyhsmm_spiketrains.models.PoissonHDPHMM(
N=N, K_max=K_max,
alpha_a_0=5.0, alpha_b_0=1.0,
gamma_a_0=gamma_a_0, gamma_b_0=1.0,
init_state_concentration=1.0,
alpha_obs=alpha_obs,
beta_obs=beta_obs)
hmm.add_data(S_train)
hmm_list.append(hmm)
method_list.append(fit)
#
# for new_alpha_obs in [0.1, 0.5, 1.0, 2.0, 2.5, 5.0, 10.0]:
# names_list.append("HDP-HMM (Scale) (alpha_obs=%.1f)" % new_alpha_obs)
# fnames_list.append("hdphmm_scale_alpha_obs%.1f" % new_alpha_obs)
# color_list.append(allcolors[1])
# hmm = \
# pyhsmm_spiketrains.models.PoissonHDPHMM(
# N=N, K_max=K_max,
# alpha_a_0=5.0, alpha_b_0=1.0,
# gamma_a_0=5.0, gamma_b_0=1.0,
# init_state_concentration=1.0,
# alpha_obs=new_alpha_obs,
# beta_obs=beta_obs)
# hmm.add_data(S_train)
# hmm_list.append(hmm)
# HDP-HMM with HMC for hyperparameters
names_list.append("HDP-HMM (HMC)")
fnames_list.append("hdphmm_hmc")
color_list.append(allcolors[1])
hmm = \
pyhsmm_spiketrains.models.PoissonHDPHMM(
N=N, K_max=K_max,
alpha_a_0=5.0, alpha_b_0=1.0,
gamma_a_0=5.0, gamma_b_0=1.0,
init_state_concentration=1.0,
alpha_obs=alpha_obs,
beta_obs=beta_obs)
hmm.add_data(S_train)
hmm._resample_obs_method = "resample_obs_hypers_hmc"
hmm_list.append(hmm)
method_list.append(fit)
# HDP-HMM with hypers set by empirical bayes
names_list.append("HDP-HMM (EB)")
fnames_list.append("hdphmm_eb")
color_list.append(allcolors[1])
hmm = \
pyhsmm_spiketrains.models.PoissonHDPHMM(
N=N, K_max=K_max,
alpha_a_0=5.0, alpha_b_0=1.0,
gamma_a_0=5.0, gamma_b_0=1.0,
init_state_concentration=1.0,
alpha_obs=alpha_obs,
beta_obs=beta_obs)
hmm.add_data(S_train)
hmm.init_obs_hypers_via_empirical_bayes()
hmm._resample_obs_method = "resample_obs_hypers_null"
hmm_list.append(hmm)
method_list.append(fit)
names_list.append("HDP-HMM (VB)")
fnames_list.append("hdphmm_vb")
color_list.append(allcolors[1])
hmm = \
pyhsmm_spiketrains.models.PoissonDATruncHDPHMM(
N=N, K_max=K_max,
alpha=12.0,
gamma=12.0,
init_state_concentration=1.0,
alpha_obs=alpha_obs,
beta_obs=beta_obs)
hmm.add_data(S_train, stateseq=np.random.randint(50, size=(S_train.shape[0],)))
hmm.init_obs_hypers_via_empirical_bayes()
hmm_list.append(hmm)
method_list.append(fit_vb)
return names_list, fnames_list, color_list, hmm_list, method_list
def run_experiment():
# Set output parameters
dataname = "hipp_2dtrack_a"
runnum = 1
output_dir = os.path.join("results", dataname, "run%03d" % runnum)
assert os.path.exists(output_dir)
# Load the data
N, S_train, pos_train, S_test, pos_test, center, radius = \
load_hipp_data(dataname)
print "Running Experiment"
print "Dataset:\t", dataname
print "N:\t\t", N
print "T_train:\t", S_train.shape[0]
print "T_test:\t", S_test.shape[0]
# Fit the baseline model
static_model = pyhsmm_spiketrains.models.PoissonStatic(N)
static_model.add_data(S_train)
static_model.max_likelihood()
static_ll = static_model.log_likelihood(S_test)
# Define a set of HMMs
names_list = []
fnames_list = []
color_list = []
model_list = []
method_list = []
# Add parametric HMMs
# nl, fnl, cl, ml = \
# make_hmm_models(N, S_train, Ks=np.arange(5,90,step=10),
# alpha_obs=1.0, beta_obs=1.0)
# names_list.extend(nl)
# fnames_list.extend(fnl)
# color_list.extend(cl)
# model_list.extend(ml)
# Add HDP_HMMs
nl, fnl, cl, ml, mthdl = \
make_hdphmm_models(N, S_train, K_max=100,
alpha_obs=1.0, beta_obs=1.0)
names_list.extend(nl)
fnames_list.extend(fnl)
color_list.extend(cl)
model_list.extend(ml)
method_list.extend(mthdl)
# Fit the models with Gibbs sampling
N_iter = 5000
for model_name, model_fname, model, method in \
zip(names_list, fnames_list, model_list, method_list):
print "Model: ", model_name
print "File: ", model_fname
print ""
output_file = os.path.join(output_dir, model_fname + ".pkl.gz")
# Check for existing results
if os.path.exists(output_file):
# print "Loading results from: ", output_file
# with gzip.open(output_file, "r") as f:
# res = cPickle.load(f)
print "Results already exist at: ", output_file
else:
res = method(model_name, model, S_test, N_iter=N_iter)
# Save results
with gzip.open(output_file, "w") as f:
print "Saving results to: ", output_file
cPickle.dump(res, f, protocol=-1)
if __name__ == "__main__":
run_experiment()
| mit |
nextgenusfs/amptk | amptk/info.py | 2 | 2029 | #!/usr/bin/env python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import sys
import os
import pandas as pd
from amptk import amptklib
from amptk.__version__ import __version__
def getVersion():
git_version = amptklib.git_version()
if git_version:
version = __version__+'-'+git_version
else:
version = __version__
return version
def main():
parentdir = os.path.join(os.path.dirname(amptklib.__file__))
db_list = []
okay_list = []
search_path = os.path.join(parentdir, 'DB')
for file in os.listdir(search_path):
file_data = []
if file.endswith(".udb"):
if file.startswith('.'):
continue
okay_list.append(file)
info_file = file + '.txt'
file_data.append(file.rstrip('.udb'))
with open(os.path.join(search_path, info_file), 'r') as info:
for line in info:
line = line.strip()
if '\t' in line:
file_data += line.split('\t')
else:
file_data += line.split(' ')
db_list.append(file_data)
if len(db_list) < 1:
db_print = "No DB configured, run 'amptk install' or 'amptk database' command."
else:
df = pd.DataFrame(db_list)
df.columns = ['DB_name', 'DB_type', 'FASTA', 'Fwd Primer',
'Rev Primer', 'Records', 'Source', 'Version', 'Date']
dfsort = df.sort_values(by='DB_name')
db_print = dfsort.to_string(index=False, justify='center')
print('------------------------------')
print('Running AMPtk v {:}'.format(getVersion()))
print('------------------------------')
print('Taxonomy Databases Installed: {:}'.format(os.path.join(
parentdir, 'DB')))
print('------------------------------')
print(db_print)
print('------------------------------')
if __name__ == "__main__":
main()
| bsd-2-clause |
stephenliu1989/msmbuilder | msmbuilder/tests/test_agglomerative.py | 1 | 1925 | import numpy as np
from mdtraj.testing import eq
from sklearn.base import clone
from sklearn.metrics import adjusted_rand_score
from msmbuilder.cluster import LandmarkAgglomerative
random = np.random.RandomState(2)
def test_1():
x = [random.randn(10, 2), random.randn(10, 2)]
n_clusters = 2
model1 = LandmarkAgglomerative(n_clusters=n_clusters)
model2 = LandmarkAgglomerative(n_clusters=n_clusters,
n_landmarks=sum(len(s) for s in x))
labels0 = clone(model1).fit(x).predict(x)
labels1 = model1.fit_predict(x)
labels2 = model2.fit_predict(x)
assert len(labels0) == 2
assert len(labels1) == 2
assert len(labels2) == 2
eq(labels0[0], labels1[0])
eq(labels0[1], labels1[1])
eq(labels0[0], labels2[0])
eq(labels0[1], labels2[1])
assert len(np.unique(np.concatenate(labels0))) == n_clusters
def test_2():
# this should be a really easy clustering problem
x = [random.randn(20, 2) + 10, random.randn(20, 2)]
n_clusters = 2
model1 = LandmarkAgglomerative(n_clusters=n_clusters)
model2 = LandmarkAgglomerative(n_clusters=n_clusters,
landmark_strategy='random',
random_state=random, n_landmarks=20)
labels1 = model1.fit_predict(x)
labels2 = model2.fit_predict(x)
assert adjusted_rand_score(np.concatenate(labels1),
np.concatenate(labels2)) == 1.0
def test_callable_metric():
def my_euc(target, ref, i):
return np.sqrt(np.sum((target - ref[i]) ** 2, axis=1))
model1 = LandmarkAgglomerative(n_clusters=10, n_landmarks=20,
metric='euclidean')
model2 = LandmarkAgglomerative(n_clusters=10, n_landmarks=20, metric=my_euc)
data = np.random.RandomState(0).randn(100, 2)
eq(model1.fit_predict([data])[0], model2.fit_predict([data])[0])
| lgpl-2.1 |
abhisg/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/matplotlib/tri/trirefine.py | 20 | 14567 | """
Mesh refinement for triangular grids.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
import matplotlib.tri.triinterpolate
class TriRefiner(object):
"""
Abstract base class for classes implementing mesh refinement.
A TriRefiner encapsulates a Triangulation object and provides tools for
mesh refinement and interpolation.
Derived classes must implements:
- ``refine_triangulation(return_tri_index=False, **kwargs)`` , where
the optional keyword arguments *kwargs* are defined in each
TriRefiner concrete implementation, and which returns :
- a refined triangulation
- optionally (depending on *return_tri_index*), for each
point of the refined triangulation: the index of
the initial triangulation triangle to which it belongs.
- ``refine_field(z, triinterpolator=None, **kwargs)`` , where:
- *z* array of field values (to refine) defined at the base
triangulation nodes
- *triinterpolator* is a
:class:`~matplotlib.tri.TriInterpolator` (optional)
- the other optional keyword arguments *kwargs* are defined in
each TriRefiner concrete implementation
and which returns (as a tuple) a refined triangular mesh and the
interpolated values of the field at the refined triangulation nodes.
"""
def __init__(self, triangulation):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
class UniformTriRefiner(TriRefiner):
"""
Uniform mesh refinement by recursive subdivisions.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation`
The encapsulated triangulation (to be refined)
"""
# See Also
# --------
# :class:`~matplotlib.tri.CubicTriInterpolator` and
# :class:`~matplotlib.tri.TriAnalyzer`.
# """
def __init__(self, triangulation):
TriRefiner.__init__(self, triangulation)
def refine_triangulation(self, return_tri_index=False, subdiv=3):
"""
Computes an uniformly refined triangulation *refi_triangulation* of
the encapsulated :attr:`triangulation`.
This function refines the encapsulated triangulation by splitting each
father triangle into 4 child sub-triangles built on the edges midside
nodes, recursively (level of recursion *subdiv*).
In the end, each triangle is hence divided into ``4**subdiv``
child triangles.
The default value for *subdiv* is 3 resulting in 64 refined
subtriangles for each triangle of the initial triangulation.
Parameters
----------
return_tri_index : boolean, optional
Boolean indicating whether an index table indicating the father
triangle index of each point will be returned. Default value
False.
subdiv : integer, optional
Recursion level for the subdivision. Defaults value 3.
Each triangle will be divided into ``4**subdiv`` child triangles.
Returns
-------
refi_triangulation : :class:`~matplotlib.tri.Triangulation`
The returned refined triangulation
found_index : array-like of integers
Index of the initial triangulation containing triangle, for each
point of *refi_triangulation*.
Returned only if *return_tri_index* is set to True.
"""
refi_triangulation = self._triangulation
ntri = refi_triangulation.triangles.shape[0]
# Computes the triangulation ancestors numbers in the reference
# triangulation.
ancestors = np.arange(ntri, dtype=np.int32)
for _ in range(subdiv):
refi_triangulation, ancestors = self._refine_triangulation_once(
refi_triangulation, ancestors)
refi_npts = refi_triangulation.x.shape[0]
refi_triangles = refi_triangulation.triangles
# Now we compute found_index table if needed
if return_tri_index:
# We have to initialize found_index with -1 because some nodes
# may very well belong to no triangle at all, e.g., in case of
# Delaunay Triangulation with DuplicatePointWarning.
found_index = - np.ones(refi_npts, dtype=np.int32)
tri_mask = self._triangulation.mask
if tri_mask is None:
found_index[refi_triangles] = np.repeat(ancestors,
3).reshape(-1, 3)
else:
# There is a subtlety here: we want to avoid whenever possible
# that refined points container is a masked triangle (which
# would result in artifacts in plots).
# So we impose the numbering from masked ancestors first,
# then overwrite it with unmasked ancestor numbers.
ancestor_mask = tri_mask[ancestors]
found_index[refi_triangles[ancestor_mask, :]
] = np.repeat(ancestors[ancestor_mask],
3).reshape(-1, 3)
found_index[refi_triangles[~ancestor_mask, :]
] = np.repeat(ancestors[~ancestor_mask],
3).reshape(-1, 3)
return refi_triangulation, found_index
else:
return refi_triangulation
def refine_field(self, z, triinterpolator=None, subdiv=3):
"""
Refines a field defined on the encapsulated triangulation.
Returns *refi_tri* (refined triangulation), *refi_z* (interpolated
values of the field at the node of the refined triangulation).
Parameters
----------
z : 1d-array-like of length ``n_points``
Values of the field to refine, defined at the nodes of the
encapsulated triangulation. (``n_points`` is the number of points
in the initial triangulation)
triinterpolator : :class:`~matplotlib.tri.TriInterpolator`, optional
Interpolator used for field interpolation. If not specified,
a :class:`~matplotlib.tri.CubicTriInterpolator` will
be used.
subdiv : integer, optional
Recursion level for the subdivision. Defaults to 3.
Each triangle will be divided into ``4**subdiv`` child triangles.
Returns
-------
refi_tri : :class:`~matplotlib.tri.Triangulation` object
The returned refined triangulation
refi_z : 1d array of length: *refi_tri* node count.
The returned interpolated field (at *refi_tri* nodes)
Examples
--------
The main application of this method is to plot high-quality
iso-contours on a coarse triangular grid (e.g., triangulation built
from relatively sparse test data):
.. plot:: mpl_examples/pylab_examples/tricontour_smooth_user.py
"""
if triinterpolator is None:
interp = matplotlib.tri.CubicTriInterpolator(
self._triangulation, z)
else:
if not isinstance(triinterpolator,
matplotlib.tri.TriInterpolator):
raise ValueError("Expected a TriInterpolator object")
interp = triinterpolator
refi_tri, found_index = self.refine_triangulation(
subdiv=subdiv, return_tri_index=True)
refi_z = interp._interpolate_multikeys(
refi_tri.x, refi_tri.y, tri_index=found_index)[0]
return refi_tri, refi_z
@staticmethod
def _refine_triangulation_once(triangulation, ancestors=None):
"""
This function refines a matplotlib.tri *triangulation* by splitting
each triangle into 4 child-masked_triangles built on the edges midside
nodes.
The masked triangles, if present, are also splitted but their children
returned masked.
If *ancestors* is not provided, returns only a new triangulation:
child_triangulation.
If the array-like key table *ancestor* is given, it shall be of shape
(ntri,) where ntri is the number of *triangulation* masked_triangles.
In this case, the function returns
(child_triangulation, child_ancestors)
child_ancestors is defined so that the 4 child masked_triangles share
the same index as their father: child_ancestors.shape = (4 * ntri,).
"""
x = triangulation.x
y = triangulation.y
# According to tri.triangulation doc:
# neighbors[i,j] is the triangle that is the neighbor
# to the edge from point index masked_triangles[i,j] to point
# index masked_triangles[i,(j+1)%3].
neighbors = triangulation.neighbors
triangles = triangulation.triangles
npts = np.shape(x)[0]
ntri = np.shape(triangles)[0]
if ancestors is not None:
ancestors = np.asarray(ancestors)
if np.shape(ancestors) != (ntri,):
raise ValueError(
"Incompatible shapes provide for triangulation"
".masked_triangles and ancestors: {0} and {1}".format(
np.shape(triangles), np.shape(ancestors)))
# Initiating tables refi_x and refi_y of the refined triangulation
# points
# hint: each apex is shared by 2 masked_triangles except the borders.
borders = np.sum(neighbors == -1)
added_pts = (3*ntri + borders) // 2
refi_npts = npts + added_pts
refi_x = np.zeros(refi_npts)
refi_y = np.zeros(refi_npts)
# First part of refi_x, refi_y is just the initial points
refi_x[:npts] = x
refi_y[:npts] = y
# Second part contains the edge midside nodes.
# Each edge belongs to 1 triangle (if border edge) or is shared by 2
# masked_triangles (interior edge).
# We first build 2 * ntri arrays of edge starting nodes (edge_elems,
# edge_apexes) ; we then extract only the masters to avoid overlaps.
# The so-called 'master' is the triangle with biggest index
# The 'slave' is the triangle with lower index
# (can be -1 if border edge)
# For slave and master we will identify the apex pointing to the edge
# start
edge_elems = np.ravel(np.vstack([np.arange(ntri, dtype=np.int32),
np.arange(ntri, dtype=np.int32),
np.arange(ntri, dtype=np.int32)]))
edge_apexes = np.ravel(np.vstack([np.zeros(ntri, dtype=np.int32),
np.ones(ntri, dtype=np.int32),
np.ones(ntri, dtype=np.int32)*2]))
edge_neighbors = neighbors[edge_elems, edge_apexes]
mask_masters = (edge_elems > edge_neighbors)
# Identifying the "masters" and adding to refi_x, refi_y vec
masters = edge_elems[mask_masters]
apex_masters = edge_apexes[mask_masters]
x_add = (x[triangles[masters, apex_masters]] +
x[triangles[masters, (apex_masters+1) % 3]]) * 0.5
y_add = (y[triangles[masters, apex_masters]] +
y[triangles[masters, (apex_masters+1) % 3]]) * 0.5
refi_x[npts:] = x_add
refi_y[npts:] = y_add
# Building the new masked_triangles ; each old masked_triangles hosts
# 4 new masked_triangles
# there are 6 pts to identify per 'old' triangle, 3 new_pt_corner and
# 3 new_pt_midside
new_pt_corner = triangles
# What is the index in refi_x, refi_y of point at middle of apex iapex
# of elem ielem ?
# If ielem is the apex master: simple count, given the way refi_x was
# built.
# If ielem is the apex slave: yet we do not know ; but we will soon
# using the neighbors table.
new_pt_midside = np.empty([ntri, 3], dtype=np.int32)
cum_sum = npts
for imid in range(3):
mask_st_loc = (imid == apex_masters)
n_masters_loc = np.sum(mask_st_loc)
elem_masters_loc = masters[mask_st_loc]
new_pt_midside[:, imid][elem_masters_loc] = np.arange(
n_masters_loc, dtype=np.int32) + cum_sum
cum_sum += n_masters_loc
# Now dealing with slave elems.
# for each slave element we identify the master and then the inode
# onces slave_masters is indentified, slave_masters_apex is such that:
# neighbors[slaves_masters, slave_masters_apex] == slaves
mask_slaves = np.logical_not(mask_masters)
slaves = edge_elems[mask_slaves]
slaves_masters = edge_neighbors[mask_slaves]
diff_table = np.abs(neighbors[slaves_masters, :] -
np.outer(slaves, np.ones(3, dtype=np.int32)))
slave_masters_apex = np.argmin(diff_table, axis=1)
slaves_apex = edge_apexes[mask_slaves]
new_pt_midside[slaves, slaves_apex] = new_pt_midside[
slaves_masters, slave_masters_apex]
# Builds the 4 child masked_triangles
child_triangles = np.empty([ntri*4, 3], dtype=np.int32)
child_triangles[0::4, :] = np.vstack([
new_pt_corner[:, 0], new_pt_midside[:, 0],
new_pt_midside[:, 2]]).T
child_triangles[1::4, :] = np.vstack([
new_pt_corner[:, 1], new_pt_midside[:, 1],
new_pt_midside[:, 0]]).T
child_triangles[2::4, :] = np.vstack([
new_pt_corner[:, 2], new_pt_midside[:, 2],
new_pt_midside[:, 1]]).T
child_triangles[3::4, :] = np.vstack([
new_pt_midside[:, 0], new_pt_midside[:, 1],
new_pt_midside[:, 2]]).T
child_triangulation = Triangulation(refi_x, refi_y, child_triangles)
# Builds the child mask
if triangulation.mask is not None:
child_triangulation.set_mask(np.repeat(triangulation.mask, 4))
if ancestors is None:
return child_triangulation
else:
return child_triangulation, np.repeat(ancestors, 4)
| gpl-2.0 |
surligas/gnuradio | gr-filter/examples/reconstruction.py | 49 | 5015 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital
from gnuradio import filter
from gnuradio import blocks
import sys
try:
from gnuradio import channels
except ImportError:
print "Error: Program requires gr-channels."
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
fftlen = 8192
def main():
N = 10000
fs = 2000.0
Ts = 1.0/fs
t = scipy.arange(0, N*Ts, Ts)
# When playing with the number of channels, be careful about the filter
# specs and the channel map of the synthesizer set below.
nchans = 10
# Build the filter(s)
bw = 1000
tb = 400
proto_taps = filter.firdes.low_pass_2(1, nchans*fs,
bw, tb, 80,
filter.firdes.WIN_BLACKMAN_hARRIS)
print "Filter length: ", len(proto_taps)
# Create a modulated signal
npwr = 0.01
data = scipy.random.randint(0, 256, N)
rrc_taps = filter.firdes.root_raised_cosine(1, 2, 1, 0.35, 41)
src = blocks.vector_source_b(data.astype(scipy.uint8).tolist(), False)
mod = digital.bpsk_mod(samples_per_symbol=2)
chan = channels.channel_model(npwr)
rrc = filter.fft_filter_ccc(1, rrc_taps)
# Split it up into pieces
channelizer = filter.pfb.channelizer_ccf(nchans, proto_taps, 2)
# Put the pieces back together again
syn_taps = [nchans*t for t in proto_taps]
synthesizer = filter.pfb_synthesizer_ccf(nchans, syn_taps, True)
src_snk = blocks.vector_sink_c()
snk = blocks.vector_sink_c()
# Remap the location of the channels
# Can be done in synth or channelizer (watch out for rotattions in
# the channelizer)
synthesizer.set_channel_map([ 0, 1, 2, 3, 4,
15, 16, 17, 18, 19])
tb = gr.top_block()
tb.connect(src, mod, chan, rrc, channelizer)
tb.connect(rrc, src_snk)
vsnk = []
for i in xrange(nchans):
tb.connect((channelizer,i), (synthesizer, i))
vsnk.append(blocks.vector_sink_c())
tb.connect((channelizer,i), vsnk[i])
tb.connect(synthesizer, snk)
tb.run()
sin = scipy.array(src_snk.data()[1000:])
sout = scipy.array(snk.data()[1000:])
# Plot original signal
fs_in = nchans*fs
f1 = pylab.figure(1, figsize=(16,12), facecolor='w')
s11 = f1.add_subplot(2,2,1)
s11.psd(sin, NFFT=fftlen, Fs=fs_in)
s11.set_title("PSD of Original Signal")
s11.set_ylim([-200, -20])
s12 = f1.add_subplot(2,2,2)
s12.plot(sin.real[1000:1500], "o-b")
s12.plot(sin.imag[1000:1500], "o-r")
s12.set_title("Original Signal in Time")
start = 1
skip = 2
s13 = f1.add_subplot(2,2,3)
s13.plot(sin.real[start::skip], sin.imag[start::skip], "o")
s13.set_title("Constellation")
s13.set_xlim([-2, 2])
s13.set_ylim([-2, 2])
# Plot channels
nrows = int(scipy.sqrt(nchans))
ncols = int(scipy.ceil(float(nchans)/float(nrows)))
f2 = pylab.figure(2, figsize=(16,12), facecolor='w')
for n in xrange(nchans):
s = f2.add_subplot(nrows, ncols, n+1)
s.psd(vsnk[n].data(), NFFT=fftlen, Fs=fs_in)
s.set_title("Channel {0}".format(n))
s.set_ylim([-200, -20])
# Plot reconstructed signal
fs_out = 2*nchans*fs
f3 = pylab.figure(3, figsize=(16,12), facecolor='w')
s31 = f3.add_subplot(2,2,1)
s31.psd(sout, NFFT=fftlen, Fs=fs_out)
s31.set_title("PSD of Reconstructed Signal")
s31.set_ylim([-200, -20])
s32 = f3.add_subplot(2,2,2)
s32.plot(sout.real[1000:1500], "o-b")
s32.plot(sout.imag[1000:1500], "o-r")
s32.set_title("Reconstructed Signal in Time")
start = 0
skip = 4
s33 = f3.add_subplot(2,2,3)
s33.plot(sout.real[start::skip], sout.imag[start::skip], "o")
s33.set_title("Constellation")
s33.set_xlim([-2, 2])
s33.set_ylim([-2, 2])
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
cspang1/4534-08 | src/supervisory/test_protocol/test_serial_com.py | 1 | 1683 | import serial
import sys
import time
import matplotlib.pyplot as plt
import numpy
def update_line(h1, x, y):
h1.set_xdata(numpy.append(h1.get_xdata(), x))
h1.set_ydata(numpy.append(h1.get_ydata(), y))
plt.draw()
'''
__author__ = 'tjd08a'
'''
port = None
for arg in sys.argv:
port = arg
ser = serial.Serial(port, baudrate=57600, timeout=10)
readNum = False
counter = 0
seconds = 0
# h1, = plt.plot([], [])
# Reboot sequence below
ser.write('$$$')
time.sleep(1)
ser.write('reboot\r')
time.sleep(3)
start = None
stop = None
initial = True
#plt.show(h1)
while 1:
#ser.write("hello world")
bytesWaiting = ser.inWaiting()
if bytesWaiting:
# print bytesWaiting
letter = ser.read(1)
val = ord(letter)
if not readNum:
if val >= 128 :
#print "Ready To Receive"
ser.write("r")
readNum = True
if initial:
start = time.time()
initial = False
else:
end = time.time()
# print "Received %i" % val
if (end - start >= 1):
seconds += 1
print "%d: Total Messages Received - %d" % (seconds, counter)
start = time.time()
if (val > 100):
if(val == 255):
#print "Stop byte received"
ser.write('e')
readNum = False
else:
print "Error: Incorrect value received"
print val
#print val
#update_line(h1, counter, val)
counter += 1
ser.flush() | gpl-3.0 |
maurov/xraysloth | sloth/math/gridxyz.py | 1 | 2757 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utilities to work with 2D grids and interpolation
=================================================
"""
from __future__ import division, print_function
import numpy as np
from sloth.utils.logging import getLogger
_logger = getLogger('gridxyz')
### GLOBAL VARIABLES ###
MODNAME = '_math'
def gridxyz(xcol, ycol, zcol, xystep=None, lib='scipy', method='cubic'):
"""Grid (X, Y, Z) 1D data on a 2D regular mesh
Parameters
----------
xcol, ycol, zcol : 1D arrays repesenting the map (z is the intensity)
xystep : the step size of the XY grid
lib : library used for griddata
[scipy]
matplotlib
method : interpolation method
Returns
-------
xgrid, ygrid : 1D arrays giving abscissa and ordinate of the map
zz : 2D array with the gridded intensity map
See also
--------
- MultipleScanToMeshPlugin in PyMca
"""
if xystep is None:
xystep = 0.1
_logger.warning("'xystep' not given: using a default value of {0}".format(xystep))
#create the XY meshgrid and interpolate the Z on the grid
nxpoints = int((xcol.max()-xcol.min())/xystep)
nypoints = int((ycol.max()-ycol.min())/xystep)
xgrid = np.linspace(xcol.min(), xcol.max(), num=nxpoints)
ygrid = np.linspace(ycol.min(), ycol.max(), num=nypoints)
xx, yy = np.meshgrid(xgrid, ygrid)
if ('matplotlib' in lib.lower()):
try:
from matplotlib.mlab import griddata
except ImportError:
_logger.error("Cannot load griddata from Matplotlib")
return
if not (method == 'nn' or method == 'nearest'):
_logger.warning("Interpolation method {0} not supported by {1}".format(method, lib))
_logger.info("Gridding data with {0}...".format(lib))
zz = griddata(xcol, ycol, zcol, xx, yy)
return xgrid, ygrid, zz
elif ('scipy' in lib.lower()):
try:
from scipy.interpolate import griddata
except ImportError:
_logger.error("Cannot load griddata from Scipy")
return
_logger.info("Gridding data with {0}...".format(lib))
zz = griddata((xcol, ycol), zcol, (xgrid[None,:], ygrid[:,None]), method=method, fill_value=0)
return xgrid, ygrid, zz
### LARCH ###
def gridxyz_larch(xcol, ycol, zcol, xystep=None, method='cubic', lib='scipy', _larch=None):
"""Larch equivalent of gridxyz() """
if _larch is None:
raise Warning("Larch broken?")
return gridxyz(xcol, ycol, zcol, xystep=xystep, method=method, lib=lib)
gridxyz_larch.__doc__ += gridxyz.__doc__
def registerLarchPlugin():
return (MODNAME, {'gridxyz': gridxyz_larch})
if __name__ == '__main__':
pass
| bsd-3-clause |
aprotopopov/lifetimes | lifetimes/datasets/__init__.py | 1 | 1813 | # -*- coding: utf-8 -*-
# modified from https://github.com/CamDavidsonPilon/lifelines/
import pandas as pd
from .. import utils
from pkg_resources import resource_filename
__all__ = [
'load_cdnow_summary',
'load_transaction_data',
'load_cdnow_summary_data_with_monetary_value',
'load_donations'
]
def load_dataset(filename, **kwargs):
"""
Load a dataset from lifetimes.datasets.
Parameters:
filename: for example "larynx.csv"
usecols: list of columns in file to use
Returns:
Pandas dataframe
"""
return pd.read_csv(resource_filename('lifetimes', 'datasets/' + filename), **kwargs)
def load_donations(**kwargs):
"""Load donations dataset as pandas DataFrame."""
return load_dataset('donations.csv', **kwargs)
def load_cdnow_summary(**kwargs):
"""Load cdnow customers summary pandas DataFrame."""
return load_dataset('cdnow_customers_summary.csv', **kwargs)
def load_transaction_data(**kwargs):
"""
Return a Pandas dataframe of transactional data.
Looks like:
date id
0 2014-03-08 00:00:00 0
1 2014-05-21 00:00:00 1
2 2014-03-14 00:00:00 2
3 2014-04-09 00:00:00 2
4 2014-05-21 00:00:00 2
The data was artificially created using Lifetimes data generation routines. Data was generated
between 2014-01-01 to 2014-12-31.
"""
return load_dataset('example_transactions.csv', **kwargs)
def load_cdnow_summary_data_with_monetary_value(**kwargs):
"""Load cdnow customers summary with monetary value as pandas DataFrame."""
df = load_dataset('cdnow_customers_summary_with_transactions.csv', **kwargs)
df.columns = ['customer_id', 'frequency', 'recency', 'T', 'monetary_value']
df = df.set_index('customer_id')
return df
| mit |
jonhadfield/acli | lib/acli/output/cloudwatch.py | 1 | 6718 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, print_function, unicode_literals)
def output_ec2_cpu(dates=None, values=None,
instance_id=None, output_type=None):
"""
@type dates: list
@type values: list
@type instance_id: unicode
@type output_type: unicode
"""
try:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
except ImportError:
exit('matplotlib required to output graphs.')
if output_type in ('graph', None):
plt.subplots_adjust(bottom=0.2)
plt.xticks(rotation=25)
ax = plt.gca()
xfmt = mdates.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
plt.plot(dates, values)
plt.gcf().autofmt_xdate()
plt.title('CPU statistics for: {0}'.format(instance_id))
plt.xlabel('Time (UTC)')
plt.ylabel('CPU %')
plt.grid(True)
plt.ylim([0, 100])
plt.show()
exit(0)
def output_ec2_mem(dates=None, values=None, instance_id=None, output_type=None):
"""
@type dates: list
@type values: list
@type instance_id: unicode
@type output_type: unicode
"""
try:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
except ImportError:
exit('matplotlib required to output graphs.')
if output_type in ('graph', None):
plt.subplots_adjust(bottom=0.2)
plt.xticks(rotation=25)
ax = plt.gca()
xfmt = mdates.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
plt.plot(dates, values)
plt.gcf().autofmt_xdate()
plt.title(' Memory usage for: {0}'.format(instance_id))
plt.xlabel('Time (UTC)')
plt.ylabel('Memory Usage %')
plt.grid(True)
plt.show()
exit(0)
def output_ec2_net(in_dates=None, in_values=None, out_dates=None,
out_values=None, instance_id=None, output_type=None):
"""
@type in_dates: list
@type in_values: list
@type out_dates: list
@type out_values: list
@type instance_id: unicode
@type output_type: unicode
"""
try:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
except ImportError:
exit('matplotlib required to output graphs.')
if output_type in ('graph', None):
plt.subplots_adjust(bottom=0.2)
plt.xticks(rotation=25)
ax = plt.gca()
xfmt = mdates.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
in_line = plt.plot(in_dates, in_values)
out_line = plt.plot(out_dates, out_values)
plt.setp(in_line, color='g', linewidth=2.0, label='inbound')
plt.setp(out_line, color='b', linewidth=2.0, label='outbound')
plt.gcf().autofmt_xdate()
plt.title('Network statistics for: {0}'.format(instance_id))
plt.xlabel('Time (UTC)')
plt.ylabel('Network (Bytes/s)')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.subplots_adjust(bottom=0.2)
plt.xticks(rotation=25)
plt.grid()
plt.show()
exit(0)
def output_ec2_vols(vols_datapoints=None, instance_id=None, output_type=None):
"""
@type vols_datapoints: list
@type instance_id: unicode
@type output_type: unicode
"""
try:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
except ImportError:
exit('matplotlib required to output graphs.')
try:
import numpy as np
except ImportError:
exit('install numpy.')
if output_type in ('graph', None):
num_plots = len(vols_datapoints)
f, axarr = plt.subplots(num_plots, sharex=True, sharey=True)
f.suptitle('Volumes for instance: {0}'.format(instance_id), fontsize=16)
plt.ylabel('Bytes')
if isinstance(axarr, np.ndarray):
for index, vol_set in enumerate(vols_datapoints):
read_dates = vol_set.get('read_dates')
read_values = vol_set.get('read_values')
write_dates = vol_set.get('write_dates')
write_values = vol_set.get('write_values')
axarr[index].set_title(vol_set.get('device_name'))
axarr[index].grid(True)
axarr[index].plot(write_dates, write_values, label='write')
axarr[index].plot(read_dates, read_values, label='read')
axarr[index].legend(loc="upper right",
title=None,
fancybox=False)
plt.subplots_adjust(bottom=0.2)
plt.xticks(rotation=25)
plt.xlabel('Time (UTC)')
else:
read_dates = vols_datapoints[0].get('read_dates')
read_values = vols_datapoints[0].get('read_values')
write_dates = vols_datapoints[0].get('write_dates')
write_values = vols_datapoints[0].get('write_values')
axarr.set_title(vols_datapoints[0].get('device_name'))
axarr.plot(write_dates, write_values, label='write')
axarr.plot(read_dates, read_values, label='read')
axarr.legend(loc="upper right",
title=None,
fancybox=False)
axarr.grid(True)
plt.subplots_adjust(bottom=0.2)
plt.xticks(rotation=25)
plt.xlabel('Time (UTC)')
ax = plt.gca()
ax.set_ylim(bottom=0)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
xfmt = mdates.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
plt.grid(True)
plt.show()
exit(0)
def output_asg_cpu(dates=None, values=None,
asg_name=None, output_type=None):
"""
@type dates: list
@type values: list
@type asg_name: unicode
@type output_type: unicode
"""
try:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
except ImportError:
exit('matplotlib required to output graphs.')
if output_type in ('graph', None):
plt.subplots_adjust(bottom=0.2)
plt.xticks(rotation=25)
ax = plt.gca()
xfmt = mdates.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
plt.plot(dates, values)
plt.gcf().autofmt_xdate()
plt.title('CPU statistics for: {0}'.format(asg_name))
plt.xlabel('Time (UTC)')
plt.ylabel('CPU %')
plt.grid()
plt.ylim([0, 100])
plt.show()
exit(0)
| mit |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/matplotlib/tests/test_ticker.py | 3 | 18980 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import nose.tools
from nose.tools import assert_equal, assert_raises
from numpy.testing import assert_almost_equal
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from matplotlib.testing.decorators import cleanup
import warnings
@cleanup(style='classic')
def test_MaxNLocator():
loc = mticker.MaxNLocator(nbins=5)
test_value = np.array([20., 40., 60., 80., 100.])
assert_almost_equal(loc.tick_values(20, 100), test_value)
test_value = np.array([0., 0.0002, 0.0004, 0.0006, 0.0008, 0.001])
assert_almost_equal(loc.tick_values(0.001, 0.0001), test_value)
test_value = np.array([-1.0e+15, -5.0e+14, 0e+00, 5e+14, 1.0e+15])
assert_almost_equal(loc.tick_values(-1e15, 1e15), test_value)
@cleanup
def test_MaxNLocator_integer():
loc = mticker.MaxNLocator(nbins=5, integer=True)
test_value = np.array([-1, 0, 1, 2])
assert_almost_equal(loc.tick_values(-0.1, 1.1), test_value)
test_value = np.array([-0.25, 0, 0.25, 0.5, 0.75, 1.0])
assert_almost_equal(loc.tick_values(-0.1, 0.95), test_value)
loc = mticker.MaxNLocator(nbins=5, integer=True, steps=[1, 1.5, 5, 6, 10])
test_value = np.array([0, 15, 30, 45, 60])
assert_almost_equal(loc.tick_values(1, 55), test_value)
def test_LinearLocator():
loc = mticker.LinearLocator(numticks=3)
test_value = np.array([-0.8, -0.3, 0.2])
assert_almost_equal(loc.tick_values(-0.8, 0.2), test_value)
def test_MultipleLocator():
loc = mticker.MultipleLocator(base=3.147)
test_value = np.array([-9.441, -6.294, -3.147, 0., 3.147, 6.294,
9.441, 12.588])
assert_almost_equal(loc.tick_values(-7, 10), test_value)
@cleanup
def test_AutoMinorLocator():
fig, ax = plt.subplots()
ax.set_xlim(0, 1.39)
ax.minorticks_on()
test_value = np.array([0.05, 0.1, 0.15, 0.25, 0.3, 0.35, 0.45,
0.5, 0.55, 0.65, 0.7, 0.75, 0.85, 0.9,
0.95, 1, 1.05, 1.1, 1.15, 1.25, 1.3, 1.35])
assert_almost_equal(ax.xaxis.get_ticklocs(minor=True), test_value)
def test_LogLocator():
loc = mticker.LogLocator(numticks=5)
assert_raises(ValueError, loc.tick_values, 0, 1000)
test_value = np.array([1.00000000e-05, 1.00000000e-03, 1.00000000e-01,
1.00000000e+01, 1.00000000e+03, 1.00000000e+05,
1.00000000e+07, 1.000000000e+09])
assert_almost_equal(loc.tick_values(0.001, 1.1e5), test_value)
loc = mticker.LogLocator(base=2)
test_value = np.array([0.5, 1., 2., 4., 8., 16., 32., 64., 128., 256.])
assert_almost_equal(loc.tick_values(1, 100), test_value)
def test_LinearLocator_set_params():
"""
Create linear locator with presets={}, numticks=2 and change it to
something else. See if change was successful. Should not exception.
"""
loc = mticker.LinearLocator(numticks=2)
loc.set_params(numticks=8, presets={(0, 1): []})
nose.tools.assert_equal(loc.numticks, 8)
nose.tools.assert_equal(loc.presets, {(0, 1): []})
def test_LogLocator_set_params():
"""
Create log locator with default value, base=10.0, subs=[1.0], numdecs=4,
numticks=15 and change it to something else.
See if change was successful.
Should not exception.
"""
loc = mticker.LogLocator()
loc.set_params(numticks=7, numdecs=8, subs=[2.0], base=4)
nose.tools.assert_equal(loc.numticks, 7)
nose.tools.assert_equal(loc.numdecs, 8)
nose.tools.assert_equal(loc._base, 4)
nose.tools.assert_equal(list(loc._subs), [2.0])
def test_NullLocator_set_params():
"""
Create null locator, and attempt to call set_params() on it.
Should not exception, and should raise a warning.
"""
loc = mticker.NullLocator()
with warnings.catch_warnings(record=True) as w:
loc.set_params()
nose.tools.assert_equal(len(w), 1)
def test_MultipleLocator_set_params():
"""
Create multiple locator with 0.7 base, and change it to something else.
See if change was successful.
Should not exception.
"""
mult = mticker.MultipleLocator(base=0.7)
mult.set_params(base=1.7)
nose.tools.assert_equal(mult._base, 1.7)
def test_LogitLocator_set_params():
"""
Create logit locator with default minor=False, and change it to something
else. See if change was successful. Should not exception.
"""
loc = mticker.LogitLocator() # Defaults to false.
loc.set_params(minor=True)
nose.tools.assert_true(loc.minor)
def test_FixedLocator_set_params():
"""
Create fixed locator with 5 nbins, and change it to something else.
See if change was successful.
Should not exception.
"""
fixed = mticker.FixedLocator(range(0, 24), nbins=5)
fixed.set_params(nbins=7)
nose.tools.assert_equal(fixed.nbins, 7)
def test_IndexLocator_set_params():
"""
Create index locator with 3 base, 4 offset. and change it to something
else. See if change was successful.
Should not exception.
"""
index = mticker.IndexLocator(base=3, offset=4)
index.set_params(base=7, offset=7)
nose.tools.assert_equal(index._base, 7)
nose.tools.assert_equal(index.offset, 7)
def test_SymmetricalLogLocator_set_params():
"""
Create symmetrical log locator with default subs =[1.0] numticks = 15,
and change it to something else.
See if change was successful.
Should not exception.
"""
sym = mticker.SymmetricalLogLocator(base=10, linthresh=1)
sym.set_params(subs=[2.0], numticks=8)
nose.tools.assert_equal(sym._subs, [2.0])
nose.tools.assert_equal(sym.numticks, 8)
@cleanup(style='classic')
def test_ScalarFormatter_offset_value():
fig, ax = plt.subplots()
formatter = ax.get_xaxis().get_major_formatter()
def check_offset_for(left, right, offset):
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', 'Attempting to set identical',
UserWarning)
ax.set_xlim(left, right)
assert_equal(len(w), 1 if left == right else 0)
# Update ticks.
next(ax.get_xaxis().iter_ticks())
assert_equal(formatter.offset, offset)
test_data = [(123, 189, 0),
(-189, -123, 0),
(12341, 12349, 12340),
(-12349, -12341, -12340),
(99999.5, 100010.5, 100000),
(-100010.5, -99999.5, -100000),
(99990.5, 100000.5, 100000),
(-100000.5, -99990.5, -100000),
(1233999, 1234001, 1234000),
(-1234001, -1233999, -1234000),
(1, 1, 1),
(123, 123, 120),
# Test cases courtesy of @WeatherGod
(.4538, .4578, .45),
(3789.12, 3783.1, 3780),
(45124.3, 45831.75, 45000),
(0.000721, 0.0007243, 0.00072),
(12592.82, 12591.43, 12590),
(9., 12., 0),
(900., 1200., 0),
(1900., 1200., 0),
(0.99, 1.01, 1),
(9.99, 10.01, 10),
(99.99, 100.01, 100),
(5.99, 6.01, 6),
(15.99, 16.01, 16),
(-0.452, 0.492, 0),
(-0.492, 0.492, 0),
(12331.4, 12350.5, 12300),
(-12335.3, 12335.3, 0)]
for left, right, offset in test_data:
yield check_offset_for, left, right, offset
yield check_offset_for, right, left, offset
def _sub_labels(axis, subs=()):
"Test whether locator marks subs to be labeled"
fmt = axis.get_minor_formatter()
minor_tlocs = axis.get_minorticklocs()
fmt.set_locs(minor_tlocs)
coefs = minor_tlocs / 10**(np.floor(np.log10(minor_tlocs)))
label_expected = [np.round(c) in subs for c in coefs]
label_test = [fmt(x) != '' for x in minor_tlocs]
assert_equal(label_test, label_expected)
@cleanup(style='default')
def test_LogFormatter_sublabel():
# test label locator
fig, ax = plt.subplots()
ax.set_xscale('log')
ax.xaxis.set_major_locator(mticker.LogLocator(base=10, subs=[]))
ax.xaxis.set_minor_locator(mticker.LogLocator(base=10,
subs=np.arange(2, 10)))
ax.xaxis.set_major_formatter(mticker.LogFormatter(labelOnlyBase=True))
ax.xaxis.set_minor_formatter(mticker.LogFormatter(labelOnlyBase=False))
# axis range above 3 decades, only bases are labeled
ax.set_xlim(1, 1e4)
fmt = ax.xaxis.get_major_formatter()
fmt.set_locs(ax.xaxis.get_majorticklocs())
show_major_labels = [fmt(x) != '' for x in ax.xaxis.get_majorticklocs()]
assert np.all(show_major_labels)
_sub_labels(ax.xaxis, subs=[])
# For the next two, if the numdec threshold in LogFormatter.set_locs
# were 3, then the label sub would be 3 for 2-3 decades and (2,5)
# for 1-2 decades. With a threshold of 1, subs are not labeled.
# axis range at 2 to 3 decades
ax.set_xlim(1, 800)
_sub_labels(ax.xaxis, subs=[])
# axis range at 1 to 2 decades
ax.set_xlim(1, 80)
_sub_labels(ax.xaxis, subs=[])
# axis range at 0.4 to 1 decades, label subs 2, 3, 4, 6
ax.set_xlim(1, 8)
_sub_labels(ax.xaxis, subs=[2, 3, 4, 6])
# axis range at 0 to 0.4 decades, label all
ax.set_xlim(0.5, 0.9)
_sub_labels(ax.xaxis, subs=np.arange(2, 10, dtype=int))
def _logfe_helper(formatter, base, locs, i, expected_result):
vals = base**locs
labels = [formatter(x, pos) for (x, pos) in zip(vals, i)]
nose.tools.assert_equal(labels, expected_result)
def test_LogFormatterExponent():
class FakeAxis(object):
"""Allow Formatter to be called without having a "full" plot set up."""
def __init__(self, vmin=1, vmax=10):
self.vmin = vmin
self.vmax = vmax
def get_view_interval(self):
return self.vmin, self.vmax
i = np.arange(-3, 4, dtype=float)
expected_result = ['-3', '-2', '-1', '0', '1', '2', '3']
for base in [2, 5.0, 10.0, np.pi, np.e]:
formatter = mticker.LogFormatterExponent(base=base)
formatter.axis = FakeAxis(1, base**4)
yield _logfe_helper, formatter, base, i, i, expected_result
# Should be a blank string for non-integer powers if labelOnlyBase=True
formatter = mticker.LogFormatterExponent(base=10, labelOnlyBase=True)
formatter.axis = FakeAxis()
nose.tools.assert_equal(formatter(10**0.1), '')
# Otherwise, non-integer powers should be nicely formatted
locs = np.array([0.1, 0.00001, np.pi, 0.2, -0.2, -0.00001])
i = range(len(locs))
expected_result = ['0.1', '1e-05', '3.14', '0.2', '-0.2', '-1e-05']
for base in [2, 5, 10, np.pi, np.e]:
formatter = mticker.LogFormatterExponent(base, labelOnlyBase=False)
formatter.axis = FakeAxis(1, base**10)
yield _logfe_helper, formatter, base, locs, i, expected_result
expected_result = ['3', '5', '12', '42']
locs = np.array([3, 5, 12, 42], dtype='float')
for base in [2, 5.0, 10.0, np.pi, np.e]:
formatter = mticker.LogFormatterExponent(base, labelOnlyBase=False)
formatter.axis = FakeAxis(1, base**50)
yield _logfe_helper, formatter, base, locs, i, expected_result
def test_LogFormatterSciNotation():
test_cases = {
10: (
(-1, '${-10^{0}}$'),
(1e-05, '${10^{-5}}$'),
(1, '${10^{0}}$'),
(100000, '${10^{5}}$'),
(2e-05, '${2\\times10^{-5}}$'),
(2, '${2\\times10^{0}}$'),
(200000, '${2\\times10^{5}}$'),
(5e-05, '${5\\times10^{-5}}$'),
(5, '${5\\times10^{0}}$'),
(500000, '${5\\times10^{5}}$'),
),
2: (
(0.03125, '${2^{-5}}$'),
(1, '${2^{0}}$'),
(32, '${2^{5}}$'),
(0.0375, '${1.2\\times2^{-5}}$'),
(1.2, '${1.2\\times2^{0}}$'),
(38.4, '${1.2\\times2^{5}}$'),
)
}
for base in test_cases.keys():
formatter = mticker.LogFormatterSciNotation(base=base)
formatter.sublabel = set([1, 2, 5, 1.2])
for value, expected in test_cases[base]:
with matplotlib.rc_context({'text.usetex': False}):
nose.tools.assert_equal(formatter(value), expected)
def _pprint_helper(value, domain, expected):
fmt = mticker.LogFormatter()
label = fmt.pprint_val(value, domain)
nose.tools.assert_equal(label, expected)
def test_logformatter_pprint():
test_cases = (
(3.141592654e-05, 0.001, '3.142e-5'),
(0.0003141592654, 0.001, '3.142e-4'),
(0.003141592654, 0.001, '3.142e-3'),
(0.03141592654, 0.001, '3.142e-2'),
(0.3141592654, 0.001, '3.142e-1'),
(3.141592654, 0.001, '3.142'),
(31.41592654, 0.001, '3.142e1'),
(314.1592654, 0.001, '3.142e2'),
(3141.592654, 0.001, '3.142e3'),
(31415.92654, 0.001, '3.142e4'),
(314159.2654, 0.001, '3.142e5'),
(1e-05, 0.001, '1e-5'),
(0.0001, 0.001, '1e-4'),
(0.001, 0.001, '1e-3'),
(0.01, 0.001, '1e-2'),
(0.1, 0.001, '1e-1'),
(1, 0.001, '1'),
(10, 0.001, '10'),
(100, 0.001, '100'),
(1000, 0.001, '1000'),
(10000, 0.001, '1e4'),
(100000, 0.001, '1e5'),
(3.141592654e-05, 0.015, '0'),
(0.0003141592654, 0.015, '0'),
(0.003141592654, 0.015, '0.003'),
(0.03141592654, 0.015, '0.031'),
(0.3141592654, 0.015, '0.314'),
(3.141592654, 0.015, '3.142'),
(31.41592654, 0.015, '31.416'),
(314.1592654, 0.015, '314.159'),
(3141.592654, 0.015, '3141.593'),
(31415.92654, 0.015, '31415.927'),
(314159.2654, 0.015, '314159.265'),
(1e-05, 0.015, '0'),
(0.0001, 0.015, '0'),
(0.001, 0.015, '0.001'),
(0.01, 0.015, '0.01'),
(0.1, 0.015, '0.1'),
(1, 0.015, '1'),
(10, 0.015, '10'),
(100, 0.015, '100'),
(1000, 0.015, '1000'),
(10000, 0.015, '10000'),
(100000, 0.015, '100000'),
(3.141592654e-05, 0.5, '0'),
(0.0003141592654, 0.5, '0'),
(0.003141592654, 0.5, '0.003'),
(0.03141592654, 0.5, '0.031'),
(0.3141592654, 0.5, '0.314'),
(3.141592654, 0.5, '3.142'),
(31.41592654, 0.5, '31.416'),
(314.1592654, 0.5, '314.159'),
(3141.592654, 0.5, '3141.593'),
(31415.92654, 0.5, '31415.927'),
(314159.2654, 0.5, '314159.265'),
(1e-05, 0.5, '0'),
(0.0001, 0.5, '0'),
(0.001, 0.5, '0.001'),
(0.01, 0.5, '0.01'),
(0.1, 0.5, '0.1'),
(1, 0.5, '1'),
(10, 0.5, '10'),
(100, 0.5, '100'),
(1000, 0.5, '1000'),
(10000, 0.5, '10000'),
(100000, 0.5, '100000'),
(3.141592654e-05, 5, '0'),
(0.0003141592654, 5, '0'),
(0.003141592654, 5, '0'),
(0.03141592654, 5, '0.03'),
(0.3141592654, 5, '0.31'),
(3.141592654, 5, '3.14'),
(31.41592654, 5, '31.42'),
(314.1592654, 5, '314.16'),
(3141.592654, 5, '3141.59'),
(31415.92654, 5, '31415.93'),
(314159.2654, 5, '314159.27'),
(1e-05, 5, '0'),
(0.0001, 5, '0'),
(0.001, 5, '0'),
(0.01, 5, '0.01'),
(0.1, 5, '0.1'),
(1, 5, '1'),
(10, 5, '10'),
(100, 5, '100'),
(1000, 5, '1000'),
(10000, 5, '10000'),
(100000, 5, '100000'),
(3.141592654e-05, 100, '0'),
(0.0003141592654, 100, '0'),
(0.003141592654, 100, '0'),
(0.03141592654, 100, '0'),
(0.3141592654, 100, '0.3'),
(3.141592654, 100, '3.1'),
(31.41592654, 100, '31.4'),
(314.1592654, 100, '314.2'),
(3141.592654, 100, '3141.6'),
(31415.92654, 100, '31415.9'),
(314159.2654, 100, '314159.3'),
(1e-05, 100, '0'),
(0.0001, 100, '0'),
(0.001, 100, '0'),
(0.01, 100, '0'),
(0.1, 100, '0.1'),
(1, 100, '1'),
(10, 100, '10'),
(100, 100, '100'),
(1000, 100, '1000'),
(10000, 100, '10000'),
(100000, 100, '100000'),
(3.141592654e-05, 1000000.0, '3.1e-5'),
(0.0003141592654, 1000000.0, '3.1e-4'),
(0.003141592654, 1000000.0, '3.1e-3'),
(0.03141592654, 1000000.0, '3.1e-2'),
(0.3141592654, 1000000.0, '3.1e-1'),
(3.141592654, 1000000.0, '3.1'),
(31.41592654, 1000000.0, '3.1e1'),
(314.1592654, 1000000.0, '3.1e2'),
(3141.592654, 1000000.0, '3.1e3'),
(31415.92654, 1000000.0, '3.1e4'),
(314159.2654, 1000000.0, '3.1e5'),
(1e-05, 1000000.0, '1e-5'),
(0.0001, 1000000.0, '1e-4'),
(0.001, 1000000.0, '1e-3'),
(0.01, 1000000.0, '1e-2'),
(0.1, 1000000.0, '1e-1'),
(1, 1000000.0, '1'),
(10, 1000000.0, '10'),
(100, 1000000.0, '100'),
(1000, 1000000.0, '1000'),
(10000, 1000000.0, '1e4'),
(100000, 1000000.0, '1e5')
)
for value, domain, expected in test_cases:
yield _pprint_helper, value, domain, expected
def test_use_offset():
for use_offset in [True, False]:
with matplotlib.rc_context({'axes.formatter.useoffset': use_offset}):
tmp_form = mticker.ScalarFormatter()
nose.tools.assert_equal(use_offset, tmp_form.get_useOffset())
def test_formatstrformatter():
# test % style formatter
tmp_form = mticker.FormatStrFormatter('%05d')
nose.tools.assert_equal('00002', tmp_form(2))
# test str.format() style formatter
tmp_form = mticker.StrMethodFormatter('{x:05d}')
nose.tools.assert_equal('00002', tmp_form(2))
def test_EngFormatter_formatting():
"""
Create two instances of EngFormatter with default parameters, with and
without a unit string ('s' for seconds). Test the formatting in some cases,
especially the case when no SI prefix is present, for values in [1, 1000).
Should not raise exceptions.
"""
unitless = mticker.EngFormatter()
nose.tools.assert_equal(unitless(0.1), u'100 m')
nose.tools.assert_equal(unitless(1), u'1')
nose.tools.assert_equal(unitless(999.9), u'999.9')
nose.tools.assert_equal(unitless(1001), u'1.001 k')
with_unit = mticker.EngFormatter(unit=u's')
nose.tools.assert_equal(with_unit(0.1), u'100 ms')
nose.tools.assert_equal(with_unit(1), u'1 s')
nose.tools.assert_equal(with_unit(999.9), u'999.9 s')
nose.tools.assert_equal(with_unit(1001), u'1.001 ks')
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| apache-2.0 |
yonglehou/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.