repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
imcgreer/rapala | ninetyprime/detchar.py | 2 | 18782 | #!/usr/bin/env python
import os,sys
from glob import glob
import numpy as np
import fitsio
import multiprocessing
from functools import partial
from scipy.ndimage.filters import gaussian_filter
from astropy.stats import sigma_clip
from astropy.table import Table,vstack
from bokpipe.bokoscan import extract_overscan,fit_overscan,overscan_subtract
from bokpipe.bokproc import ampOrder
from bokpipe.bokutil import stats_region,array_clip,array_stats
import matplotlib.pyplot as plt
from matplotlib import ticker
def _data2arr(data,minVal=0,maxVal=65335,clip=True):
arr = np.ma.masked_array(data.astype(np.float32),
mask=((data<minVal)|(data>maxVal)))
return array_clip(arr,clip_iters=2,clip_sig=5.0)
def _open_fits(f):
try:
return fitsio.FITS(f)
except IOError:
return fitsio.FITS(f+'.fz')
def calc_gain_rdnoise(biases,flats):
rv = []
s = stats_region('amp_corner_ccdcenter_1024')
for files in zip(biases[:-1],biases[1:],flats[:-1],flats[1:]):
ff = [_open_fits(f) for f in files]
data = np.empty(1,dtype=[('bias1','S35'),('bias2','S35'),
('flat1','S35'),('flat2','S35'),
('biasADU','f4',16),('flatADU','f4',16),
('biasRmsADU','f4',16),('flatRmsADU','f4',16),
('gain','f4',16),('rdnoise','f4',16)])
data['bias1'] = os.path.basename(files[0])
data['bias2'] = os.path.basename(files[1])
data['flat1'] = os.path.basename(files[2])
data['flat2'] = os.path.basename(files[3])
skip = False
for ext in range(1,17):
try:
bias1,bias2 = [ _data2arr(f[ext].read()[s],100,5000)
for f in ff[:2]]
flat1,flat2 = [ _data2arr(f[ext].read()[s],100,50000)
for f in ff[2:] ]
except:
print 'failed with ',ff
skip = True
break
_B1 = bias1.mean()
_B2 = bias2.mean()
_F1 = flat1.mean()
_F2 = flat2.mean()
if ext==1 and (np.abs(_F1-_F2)/_F1) > 0.05:
# a very large jump in the flat field value will throw
# this calculation off, restrict it to 5% variation
skip = True
break
try:
varF1F2 = (flat1-flat2).var()
varB1B2 = (bias1-bias2).var()
except:
# some images have wrong format...
data['gain'][0,ext-1] = -1
data['rdnoise'][0,ext-1] = -1
continue
# equations from end of sec 4.3 (pg 73) of Howell 2006
gain = ( (_F1 + _F2) - (_B1 + _B2) ) / (varF1F2 - varB1B2)
rdnoise = gain * np.sqrt(varB1B2/2)
data['biasADU'][0,ext-1] = _B1
data['flatADU'][0,ext-1] = _F1
data['biasRmsADU'][0,ext-1] = bias1.std()
data['flatRmsADU'][0,ext-1] = flat1.std()
data['gain'][0,ext-1] = gain
data['rdnoise'][0,ext-1] = rdnoise
if not skip:
rv.append(data)
return np.concatenate(rv)
def fastreadout_analysis():
datadir = get_BASS_datadir()
getims = lambda f1,f2: [os.path.join(datadir,'20150205',
'd7058.%04d.fits.gz'%f)
for f in range(f1,f2+1)]
#
bias_std_fns = getims(1,10)
bias_oscan_fns = getims(11,20)
bias_fast_fns = getims(21,30)
flat_fastlo_fns = getims(31,40)
flat_fasthi_fns = getims(41,50)
flat_stdhi_fns = getims(51,60)
flat_stdlo_fns = getims(61,62)
x1,x2 = 300,-300
y1,y2 = 300,-300
#
det = {}
for readmode in ['standard','fast']:
det[readmode] = {
'gain':np.zeros((16,9)),
'readnoise':np.zeros((16,9)),
}
if readmode == 'standard':
biases = bias_std_fns
flats = flat_stdhi_fns
else:
biases = bias_fast_fns
flats = flat_fasthi_fns
j = 0
for b1fn,b2fn,f1fn,f2fn in zip(biases[:-1],biases[1:],
flats[:-1],flats[1:]):
bias1fits = fits.open(b1fn)
bias2fits = fits.open(b2fn)
flat1fits = fits.open(f1fn)
flat2fits = fits.open(f2fn)
for ext in range(1,17):
bias1 = sigma_clip(bias1fits[ext].data[y1:y2,x1:x2])
bias2 = sigma_clip(bias2fits[ext].data[y1:y2,x1:x2])
flat1 = sigma_clip(flat1fits[ext].data[y1:y2,x1:x2])
flat2 = sigma_clip(flat2fits[ext].data[y1:y2,x1:x2])
_B1 = bias1.mean()
_B2 = bias2.mean()
_F1 = flat1.mean()
_F2 = flat2.mean()
varF1F2 = (flat1-flat2).var()
varB1B2 = (bias1-bias2).var()
# equations from end of sec 4.3 (pg 73) of Howell 2006
gain = ( (_F1 + _F2) - (_B1 + _B2) ) / (varF1F2 - varB1B2)
rdnoise = gain * np.sqrt(varB1B2/2)
det[readmode]['gain'][ext-1,j] = gain
det[readmode]['readnoise'][ext-1,j] = rdnoise
print readmode,j,ext,gain,rdnoise,_F1
j += 1
return det
def dump_fastmode_analysis(det):
print ' '*10,'%6s %6s %6s ' % ('','gain',''),
print '%6s %6s %6s ' % ('','rdnoise','')
print ' '*10,'%6s %6s %6s ' % ('std','fast','ratio'),
print '%6s %6s %6s' % ('std','fast','ratio')
for i in range(16):
print 'chip #%2d ' % (i+1),
for p in ['gain','readnoise']:
v1 = sigma_clip(det['standard'][p][i])
v2 = sigma_clip(det['fast'][p][i])
print '%6.2f %6.2f %6.3f ' % \
(v1.mean(),v2.mean(),v2.mean()/v1.mean()),
print
def plot_fastmode_analysis(det):
for p in ['gain','readnoise']:
plt.figure(figsize=(7.5,9.5))
plt.subplots_adjust(0.03,0.03,0.98,0.98)
for i in range(16):
plt.subplot(4,4,i+1)
for mode in ['standard','fast']:
v = det[mode][p][i]
eta = {'gain':0.01,'readnoise':0.05}[p]
bins = np.arange(v.min()-eta,v.max()+2*eta,eta)
plt.hist(v,bins,histtype='step')
def find_cal_sequences(log,min_len=5):
# frameIndex isn't actually a straight table index, but rather a unique
# id. adding a running index makes the group sorting much clearer.
t = Table(log).group_by('utDir')
t['ii'] = np.arange(len(t))
calseqs = {'zero':[],'flat':[],'zero_and_flat':[]}
for ut in t.groups:
iscal = np.where((ut['imType']=='zero')|(ut['imType']=='flat'))[0]
if len(iscal)==0:
continue
# this wouldn't work if someone changed the filter in the middle
# of a bias sequence... not worth worrying about
ut_type = ut[iscal].group_by(['imType','filter','expTime'])
for utt in ut_type.groups:
if len(utt) < min_len:
continue
imType = utt['imType'][0]
ii = np.arange(len(utt))
seqs = np.split(ii,1+np.where(np.diff(utt['frameIndex'])>1)[0])
seqs = [ np.array(utt['ii'][s])
for s in seqs if len(s) >= min_len ]
calseqs[imType].extend(seqs)
# bias/flat sequences taken in succession, for gain/RN calculation
# kind of hacky, just look for a set of flats taken roughly close to
# each set of biases (as in, within 20 minutes)
max_deltat_minutes = 20.
mjdk = 'mjd' if 'mjd' in t.colnames else 'mjdStart'
bias_times = np.array([ t[mjdk][s[0]] for s in calseqs['zero'] ])
flat_times = np.array([ t[mjdk][s[0]] for s in calseqs['flat'] ])
if len(flat_times) > 0:
for bt,bs in zip(bias_times,calseqs['zero']):
j = np.argmin(np.abs(bt-flat_times))
if 24*60*np.abs(bt-flat_times[j]) < max_deltat_minutes:
calseqs['zero_and_flat'].append((bs,calseqs['flat'][j]))
return calseqs
def bias_checks(bias,overscan=False):
i = 0
rv = np.zeros(1,dtype=[('fileName','S35'),
('sliceMeanAdu','f4',(16,)),
('sliceRmsAdu','f4',(16,)),
('sliceRangeAdu','f4',(16,)),
('dropFlag','i4',(16,)),
('residualMeanAdu','f4',(16,)),
('residualRmsAdu','f4',(16,))])
fn = os.path.basename(bias).replace('.fz','').replace('.fits','')
print 'checking ',fn
rv['fileName'][i] = fn
fits = _open_fits(bias)
if len(fits[1:]) != 16:
print 'ERROR: %s has %d img extensions' % (fn,len(fits[1:]))
return rv
for j,hdu in enumerate(fits[1:]):
imNum = 'IM%d' % ampOrder[j]
try:
data = hdu.read().astype(np.float32)
hdr = hdu.read_header()
except:
print 'ERROR: failed to read %s[%d]'%(fn,j+1)
continue
if overscan:
_data,oscan_cols,oscan_rows = extract_overscan(data,hdr)
#colbias = fit_overscan(oscan_cols,**kwargs)
if oscan_rows is not None:
rowbias = fit_overscan(oscan_rows,along='rows',
method='cubic_spline')
cslice = sigma_clip(data[-22:-2:,2:-22],
iters=1,sigma=3.0,axis=0)
else:
cslice = None # No row overscan to check
bottomslice = data[5:10,-16:-2].mean(axis=0)
middleslice = data[100:110,-16:-2].mean(axis=0)
else:
cslice = sigma_clip(data[1032:1048,2:-22],iters=1,sigma=3.0,axis=0)
bottomslice = data[5:10,1000:1014].mean(axis=0)
middleslice = data[100:110,1000:1014].mean(axis=0)
if cslice is not None:
cslice = cslice.mean(axis=0)
cslice = gaussian_filter(cslice,17)
rv['sliceMeanAdu'][i,j] = cslice.mean()
rv['sliceRmsAdu'][i,j] = cslice.std()
rv['sliceRangeAdu'][i,j] = cslice.max() - cslice.min()
if np.median(middleslice-bottomslice) > 15:
print 'found drop in ',bias,j
rv['dropFlag'][i,j] = 1
bias_residual = overscan_subtract(data,hdr)
s = stats_region('amp_central_quadrant')
mn,sd = array_stats(bias_residual[s],method='mean',rms=True,
clip_sig=5.0,clip_iters=2)
rv['residualMeanAdu'][i,j] = mn
rv['residualRmsAdu'][i,j] = sd
return rv
def quick_parallel(fun,input,nproc,**kwargs):
if nproc > 1:
fun_with_args = partial(fun,**kwargs)
p = multiprocessing.Pool(nproc)
rv = p.map(fun_with_args,input)
p.close()
p.join()
else:
# instead of creating a 1-process pool just run the sequence
rv = [ fun(x,**kwargs) for x in input ]
return np.concatenate(rv)
def run_qa(log,logFits,datadir,nproc=1,dogainrn=True,dobitcheck=True,
nsplit=0,nrun=0):
imType = np.char.rstrip(log['imType'])
fileNames = np.char.rstrip(log['fileName'])
utDirs = np.char.rstrip(log['utDir'])
filePaths = np.char.add(np.char.add(utDirs,'/'),fileNames)
filePaths = np.char.add(np.char.add(datadir,filePaths),'.fits')
calseqs = find_cal_sequences(log)
for imtype in calseqs:
print 'found %d %s sequences' % (len(calseqs[imtype]),imtype)
#
# empirical gain/readnoise calculation
#
if dogainrn:
for bi,fi in calseqs['zero_and_flat']:
biases = filePaths[bi]
flats = filePaths[fi]
# skip the first image in each sequence
gainrdnoise = calc_gain_rdnoise(biases[1:],flats[1:])
logFits.write(gainrdnoise,extname='GAINRN')
#
# bit integrity check
#
if dobitcheck and len(calseqs['flat'])>0:
flats = filePaths[np.concatenate(calseqs['flat'])]
nbits = 8
bitbit = np.zeros(len(flats),dtype=[('fileName','S35'),
('bitFreq','f4',(16,nbits))])
for i,flat in enumerate(flats):
fn = os.path.basename(flat).replace('.fz','').replace('.fits','')
bitbit['fileName'][i] = fn
fits = _open_fits(flat)
for j,hdu in enumerate(fits[1:]):
imNum = 'IM%d' % ampOrder[j]
data = hdu.read().astype(np.int32)
npix = float(data.size)
for bit in range(nbits):
nbit = np.sum((data&(1<<bit))>0)
bitbit['bitFreq'][i,j,bit] = nbit/npix
print 'flat ',i,' out of ',len(flats)
logFits.write(bitbit,extname='BITCHK')
#
# bias ramps
#
if len(calseqs['zero'])>0:
# this checks for bias features using the image region of biases,
# used to check that the overscan feature search works correctly
biases = filePaths[np.concatenate(calseqs['zero'])]
biasrmp = quick_parallel(bias_checks,biases,nproc,overscan=False)
logFits.write(biasrmp,extname='BIASCHK')
ii = np.where((imType=='zero')|(imType=='object'))[0]
if nsplit > 0:
print 'splits: ',ii[0],len(ii),
ii = np.array_split(ii,nsplit)[nrun]
print ii[0],len(ii)
print 'checking overscans for ',len(ii),' images'
images = filePaths[ii]
biasrmp = quick_parallel(bias_checks,images,nproc,overscan=True)
biasrmp = np.lib.recfunctions.append_fields(biasrmp,'imType',imType[ii],
dtypes=imType.dtype)
logFits.write(biasrmp,extname='OSCANCHK')
def run_nightly_checks(utdir,logdir,datadir,redo=False):
from bokpipe.bokobsdb import generate_log
print 'running nightly check on ',utdir
logf = os.path.join(logdir,'log_ut%s.fits' % os.path.basename(utdir))
if os.path.exists(logf):
# assume that if log file exists processing is already done
if not redo:
return
else:
# start the file over
log = fitsio.read(logf,1)
os.remove(logf)
logFits = fitsio.FITS(logf,'rw')
logFits.write(log)
else:
generate_log([utdir],logf)
logFits = fitsio.FITS(logf,'rw')
log = logFits[1].read()
run_qa(log,logFits,datadir)
logFits.close()
def _reportfig_init8():
fig = plt.figure(figsize=(7.5,9.5))
fig.subplots_adjust(0.1,0.025,0.95,0.975,0.35,0.3)
return fig
def gainrn_report(data,outf,utbreaks=None,save=True):
plot_fields = [ ('biasADU',(700,1200), 'biasRmsADU',(2,10)),
('flatADU',(1e4,6e4), 'flatRmsADU',(0,600)),
('gain',(0,3), 'rdnoise',(0,15)),
]
for i in range(len(data)):
outf.write('%4d %s %s %s %s\n' % (i,data['bias1'][i],data['bias2'][i],
data['flat1'][i],data['flat2'][i]))
outf.write('\n')
for fields in plot_fields:
fig = _reportfig_init8()
f1,fr1,f2,fr2 = fields
for i in range(4):
ax1 = fig.add_subplot(4,2,i+1)
ax2 = fig.add_subplot(4,2,i+1+4)
for utb in utbreaks:
for ax in [ax1,ax2]:
ax.axvline(utb,c='gray',ls=':',lw=0.5)
for j in range(4):
ax1.plot(data[f1][:,4*i+j],label='IM%d'%ampOrder[4*i+j])
ax2.plot(data[f2][:,4*i+j],label='IM%d'%ampOrder[4*i+j])
ax1.legend(ncol=4,frameon=False,fontsize=9,columnspacing=1.0)
ax2.legend(ncol=4,frameon=False,fontsize=9,columnspacing=1.0)
ax1.set_ylim(*fr1)
ax2.set_ylim(*fr2)
ax1.set_ylabel(f1+' CCD%d'%(i+1),size=11)
ax2.set_ylabel(f2+' CCD%d'%(i+1),size=11)
for f in [f1,f2]:
for j in range(4):
outf.write('%-12s CCD%d\n' % (f,j+1))
for i in range(len(data)):
outf.write('%4d ' % (i))
for k in range(4):
outf.write('%8.2f '%data[f][i,4*j+k])
outf.write('\n')
outf.write('\n')
if save:
plt.savefig('bass_summary_%s.png'%f1[:4])
def _reportfig_init16():
fig = plt.figure(figsize=(7.5,9.5))
fig.subplots_adjust(0.1,0.025,0.95,0.975,0.35,0.3)
return fig
def bit_report(data,outf,utbreaks=None,save=True):
fig = _reportfig_init16()
for i in range(16):
ax = fig.add_subplot(4,4,i+1)
if utbreaks is not None:
for utb in utbreaks:
ax.axvline(utb,c='gray',ls=':',lw=0.5)
haslab = False # suppresses a warning message
for j in range(6):
l = 'bit%d'%j if j//2==i else None
if l is not None: haslab = True
ax.plot(data['bitFreq'][:,i,j],label=l)
for n in range(data['bitFreq'].shape[0]):
outf.write('%s ' % data['fileName'][n])
outf.write(('%.3f '*6) % tuple(data['bitFreq'][n,i,:6]))
outf.write('\n')
if haslab:
ax.legend(ncol=2,frameon=False,fontsize=8,columnspacing=1.0)
ax.set_ylim(0.35,0.65)
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.01))
if save:
plt.savefig('bass_summary_%s.png'%'bits')
def calc_overheads(logdata):
logdata = Table(logdata)
mjdk = 'mjd' if 'mjd' in logdata.colnames else 'mjdStart'
dt = np.diff(logdata[mjdk])*24*3600 - logdata['expTime'][:-1]
imt = {'zero':0,'dark':1,'flat':2,'object':3}
imts = [imt[img['imType'].strip()] for img in logdata[:-1]]
return imts,logdata[mjdk][:-1],dt
def overhead_report(oheads):
if type(oheads) is str:
oheads = np.loadtxt(oheads,unpack=True)
imt,mjd,dt = oheads
imt = imt.astype(np.int32)
plt.scatter(mjd,dt,c=np.choose(imt,['gray','black','cyan','blue']))
def combined_report(logdir,utdates):
if utdates is None:
utdates = '*'
logfiles = sorted(glob(os.path.join(logdir,'log_ut%s.fits'%utdates)))
print logfiles
data1,data2 = [],[]
utbreaks1,utbreaks2 = [0,],[0,]
oheads = []
for i,logf in enumerate(logfiles):
fits = fitsio.FITS(logf)
try:
data1.append(Table(fits[2].read()))
utbreaks1.append(len(data1[-1])+utbreaks1[-1])
except:
pass
try:
data2.append(Table(fits[3].read()))
utbreaks2.append(len(data2[-1])+utbreaks2[-1])
except:
pass
log = fits[1].read()
oheads.append(calc_overheads(log))
data1 = vstack(data1)
data2 = vstack(data2)
oheads = np.hstack(oheads)
np.savetxt('bass_overheads.txt',oheads.transpose())
with open('bass_summary.txt','w') as outf:
gainrn_report(data1,outf,utbreaks1[1:])
bit_report(data2,outf,utbreaks2[1:])
if __name__=='__main__':
import argparse
plt.ioff()
parser = argparse.ArgumentParser()
parser.add_argument("-n","--nightly",action='store_true',
help="run nightly processing")
parser.add_argument("-r","--report",action='store_true',
help="make nightly report")
parser.add_argument("-d","--datadir",type=str,
default="/data/primefocus/bass/",
help="top-level data directory")
parser.add_argument("-l","--logdir",type=str,
default="/home/mcgreer/basslogs/",
help="log file directory")
parser.add_argument("-o","--output",type=str,
default="bassqa.fits",
help="output file")
parser.add_argument("--logfile",type=str,
help="input log file")
parser.add_argument("--nogainrn",action='store_true',
help="skip gain/readnoise calc (SLOW)")
parser.add_argument("--nobitcheck",action='store_true',
help="skip bit integrity check")
parser.add_argument("--nproc",type=int,default=1,
help="set number of processes to run [default 1]")
parser.add_argument("-R","--redo",action='store_true',
help="ignore existing data and redo")
parser.add_argument("--numsplit",type=int,default=0,
help="number of chunks to split data into [none]")
parser.add_argument("--splitnum",type=int,default=0,
help="which chunk number to run")
parser.add_argument("-u","--utdate",type=str,
help="restrict UT date")
args = parser.parse_args()
#
if args.utdate is not None:
utdirs = sorted(glob(os.path.join(args.datadir,args.utdate)))
else:
utdirs = sorted(glob(os.path.join(args.datadir,'201?????')))
if args.nightly:
for utdir in utdirs:
if args.logfile:
log = fitsio.read(args.logfile,1)
if os.path.exists(args.output):
os.remove(args.output)
logFits = fitsio.FITS(args.output,'rw')
logFits.write(None)
run_qa(log,logFits,args.datadir,nproc=args.nproc,
dogainrn=(not args.nogainrn),
dobitcheck=(not args.nobitcheck),
nsplit=args.numsplit,nrun=args.splitnum)
logFits.close()
# if this isn't here multiprocess gets stuck in an infinite
# loop... why?
sys.exit(0)
else:
run_nightly_checks(utdir,args.logdir,args.datadir,
redo=args.redo)
if args.report:
combined_report(args.logdir,args.utdate)
| bsd-3-clause |
micahhausler/pandashells | pandashells/test/module_checker_lib_tests.py | 7 | 1443 | #! /usr/bin/env python
from unittest import TestCase
from pandashells.lib.module_checker_lib import check_for_modules
from pandashells.lib import module_checker_lib
from mock import patch
class ModuleCheckerTests(TestCase):
def setUp(self):
module_checker_lib.CMD_DICT['fakemodule1'] = 'pip install fakemodule1'
module_checker_lib.CMD_DICT['fakemodule2'] = 'pip install fakemodule2'
module_checker_lib.CMD_DICT['os'] = 'part of standard module'
def test_check_for_modules_unrecognized(self):
"""
check_for_modules() raises error when module is unrecognized
"""
with self.assertRaises(ValueError):
check_for_modules(['not_a_module'])
@patch('pandashells.lib.module_checker_lib.importlib.import_module')
def test_check_for_modules_no_modules(self, import_module_mock):
"""
check_for_modules() does nothing when module list is empty
"""
check_for_modules([])
self.assertFalse(import_module_mock.called)
def test_check_for_modules_existing_module(self):
"""
check_for_modules() successfully finds existing module
"""
check_for_modules(['os'])
def test_check_for_modules_bad(self):
"""
check_for_modules() correctly identifies missing modules
"""
with self.assertRaises(ImportError):
check_for_modules(['fakemodule1', 'fakemodule2'])
| bsd-2-clause |
volodymyrss/3ML | threeML/catalogs/Swift.py | 1 | 10812 | import numpy as np
import pandas as pd
import re
import urllib2
import astropy.table as astro_table
from threeML.catalogs.VirtualObservatoryCatalog import VirtualObservatoryCatalog
from threeML.exceptions.custom_exceptions import custom_warnings
from threeML.config.config import threeML_config
from threeML.io.get_heasarc_table_as_pandas import get_heasarc_table_as_pandas
from threeML.io.rich_display import display
import astropy.time as astro_time
_gcn_match = re.compile("^\d{4}GCN\D?\.*(\d*)\.*\d\D$")
_trigger_name_match = re.compile("^GRB \d{6}[A-Z]$")
class SwiftGRBCatalog(VirtualObservatoryCatalog):
def __init__(self, update=False):
"""
The Swift GRB catalog. Search for GRBs by trigger
number, location, T90, and date range.
:param update: force update the XML VO table
"""
self._update = update
super(SwiftGRBCatalog, self).__init__('swiftgrb',
threeML_config['catalogs']['Swift']['Swift GRB catalog'],
'Swift GRB catalog')
# collect all the instruments also seeing the GRBs
self._build_other_obs_instruments()
def apply_format(self, table):
new_table = table['name',
'ra', 'dec',
'trigger_time',
'redshift',
'bat_t90',
'bat_detection',
'xrt_detection',
'xrt_flare',
'uvot_detection',
'radio_detection',
'opt_detection'
]
new_table['ra'].format = '5.3f'
new_table['dec'].format = '5.3f'
return new_table.group_by('trigger_time')
def _get_vo_table_from_source(self):
self._vo_dataframe = get_heasarc_table_as_pandas('swiftgrb',
update=self._update,
cache_time_days=1.)
def _source_is_valid(self, source):
warn_string = "The trigger %s is not valid. Must be in the form GRB080916009" % source
match = _trigger_name_match.match(source)
if match is None:
custom_warnings.warn(warn_string)
answer = False
else:
answer = True
return answer
def _build_other_obs_instruments(self):
"""
builds a list of all the other instruments that observed Swift GRBs
:return:
"""
obs_inst_ = map(np.unique, [np.asarray(self._vo_dataframe.other_obs),
np.asarray(self._vo_dataframe.other_obs2),
np.asarray(self._vo_dataframe.other_obs3),
np.asarray(self._vo_dataframe.other_obs4)])
self._other_observings_instruments = filter(lambda x: x != '', np.unique(np.concatenate(obs_inst_)))
@property
def other_observing_instruments(self):
return self._other_observings_instruments
def query_other_observing_instruments(self, *instruments):
"""
search for observations that were also seen by the requested instrument.
to see what instruments are available, use the .other_observing_instruments call
:param instruments: other instruments
:return:
"""
all_queries = []
for instrument in instruments:
assert instrument in self._other_observings_instruments, "Other instrument choices include %s" % (
' ,'.join(self._other_observings_instruments))
query_string = ' other_obs == "%s" | other_obs2 == "%s" |other_obs3 == "%s" |other_obs4 == "%s"' %tuple([instrument]*4)
result = self._vo_dataframe.query(query_string)
all_queries.append(result)
query_results = pd.concat(all_queries)
table = astro_table.Table.from_pandas(query_results)
name_column = astro_table.Column(name='name', data=query_results.index)
table.add_column(name_column, index=0)
out = self.apply_format(table)
self._last_query_results = query_results
return out
@staticmethod
def _get_fermiGBM_trigger_number_from_gcn(gcn_url):
"""
this is a custom function that parses GBM GCNs to find the burst number
that can later be used to download GBM data. It contains a lot of regex statements
to handle the variability in the GCNs
:param gcn_url: url to gbm gcn
:return:
"""
data = urllib2.urlopen(gcn_url)
string = ''.join(data.readlines()).replace('\n', '')
try:
trigger_number = re.search("trigger *\d* */ *(\d{9}|\d{6}\.\d{3})", string).group(1).replace('.', '')
except(AttributeError):
try:
trigger_number = re.search("GBM *(\d{9}|\d{6}\.\d{3}), *trigger *\d*", string).group(1).replace('.', '')
except(AttributeError):
try:
trigger_number = re.search("trigger *\d* *, *trigcat *(\d{9}|\d{6}\.\d{3})", string).group(
1).replace('.', '')
except(AttributeError):
try:
trigger_number = re.search("trigger *.* */ *\D{0,3}(\d{9}|\d{6}\.\d{3})", string).group(
1).replace('.', '')
except(AttributeError):
try:
trigger_number = re.search("Trigger number*.* */ *GRB *(\d{9}|\d{6}\.\d{3})", string).group(
1).replace('.', '')
except(AttributeError):
trigger_number = None
return trigger_number
def get_other_observation_information(self):
"""
returns a structured pandas table containing the other observing instruments, their GCNs and if obtainable,
their trigger numbers/ data identifiers. Currently, the trigger number is only obtained for Fermi-LAT-GBM.
:return:
"""
assert self._last_query_results is not None, "You have to run a query before getting observing information"
# Loop over the table and build a source for each entry
sources = {}
for name, row in self._last_query_results.T.iteritems():
# First we want to get the the detectors used in the SCAT file
obs_instrument = {}
for obs in ['xrt', 'uvot', 'bat', 'opt', 'radio']:
obs_detection = "%s_detection" % obs
if obs in ['xrt', 'uvot', 'bat']:
obs_ref = "%s_pos_ref" % obs
else:
obs_ref = "%s_ref" % obs
detect = row[obs_detection]
if detect == 'Y': # or detect== 'U':
observed = True
else:
observed = False
if observed:
reference = self._parse_redshift_reference(row[obs_ref])
#gcn = "https://gcn.gsfc.nasa.gov/gcn3/%s.gcn3" % gcn_number
info = {'reference': reference, 'observed': detect}
else:
info = {'GCN': None, 'observed': detect}
obs_instrument[obs] = info
sources[name] = obs_instrument
sources = pd.concat(map(pd.DataFrame, sources.values()), keys=sources.keys())
return sources
def get_other_instrument_information(self):
"""
Return the detectors used for spectral analysis as well as their background
intervals. Peak flux and fluence intervals are also returned as well as best fit models
:return: observing information dataframe indexed by source
"""
assert self._last_query_results is not None, "You have to run a query before getting observing information"
sources = {}
for name, row in self._last_query_results.T.iteritems():
obs_instrument = {}
# loop over the observation indices
for obs in range(1, 5):
if obs == 1:
obs_base = "other_obs"
else:
obs_base = "other_obs%d" % obs
obs_ref = "%s_ref" % obs_base
obs = row[obs_base]
# this means that nothing in this column saw the grb
if obs == '':
observed = False
else:
observed = True
if observed:
# if we saw it then lets get the GCN
gcn_number = _gcn_match.search(row[obs_ref]).group(1)
# gcn_number = filter(lambda x: x != '', row[obs_ref].split('.'))[1]
# make the URL
gcn = "https://gcn.gsfc.nasa.gov/gcn3/%s.gcn3" % gcn_number
# just for Fermi GBM, lets get the trigger number
# TODO: add more instruments
if obs == 'Fermi-GBM':
info = {'GCN': gcn, 'trigger number': self._get_fermiGBM_trigger_number_from_gcn(str(gcn))}
else:
info = {'GCN': gcn, 'trigger number': None}
obs_instrument[obs] = info
sources[name] = obs_instrument
# build the data frame
sources = pd.concat(map(pd.DataFrame, sources.values()), keys=sources.keys())
display(sources)
return sources
def get_redshift(self):
"""
Get the redshift and redshift type from the searched sources
:return:
"""
assert self._last_query_results is not None, "You have to run a query before getting observing information"
redshift_df = (self._last_query_results.loc[:,['redshift','redshift_err','redshift_type','redshift_ref']]).copy(deep=True)
redshift_df = redshift_df.rename(columns={"redshift": "z", "redshift_err": "z err",'redshift_type': 'z type','redshift_ref':'reference'})
redshift_df['reference'] = redshift_df['reference'].apply(self._parse_redshift_reference)
return redshift_df
@staticmethod
def _parse_redshift_reference(reference):
if reference == '':
url = None
elif 'GCN' in reference:
gcn_number = _gcn_match.search(reference).group(1)
url = "https://gcn.gsfc.nasa.gov/gcn3/%s.gcn3" % gcn_number
else:
url = "http://adsabs.harvard.edu/abs/%s" % reference
return url
| bsd-3-clause |
marcocaccin/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
PatrickChrist/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
jardians/sp17-i524 | project/S17-IR-P012/code/binarize.py | 21 | 1096 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 4 15:56:31 2017
I524 Project: OCR
Preprocessing
Binarization
@author: saber
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt
image_path = 'sample1.png'
image_arr = cv2.imread(image_path, 0)
plt.figure(1)
plt.subplot(311)
# Plot histogram of data
plt.hist(image_arr.flatten())
hist, bin_centers = np.histogram(image_arr)
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
mean1 = np.cumsum(hist * bin_centers[1:]) / weight1
mean2 = np.cumsum((hist * bin_centers[1:]) / weight2[::-1])[::-1]
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:])**2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
img_bin = np.zeros(image_arr.shape)
for i in range(image_arr.shape[0]):
for j in range(image_arr.shape[1]):
if image_arr[i, j] > threshold:
img_bin[i, j] = 255
else:
img_bin[i, j] = 0
#plt.imshow(image_arr)
#plt.imshow(img_bin)
plt.subplot(312)
plt.imshow(image_arr, 'gray')
plt.subplot(313)
plt.imshow(img_bin, 'gray')
| apache-2.0 |
deprecated/nebulio | nebulio/tests/test_composite.py | 1 | 3402 | """Tests for composite filter """
from __future__ import (print_function, absolute_import, division, unicode_literals)
import os
import pytest
import numpy as np
import nebulio
import pysynphot
from nebulio.tests.utils import this_func_name
from matplotlib import pyplot as plt
def plot_filterset(fs):
title = this_func_name(2)
fig, ax = plt.subplots()
# Plot each bandpass
for bp in fs.bandpasses:
ax.plot(bp.wave, bp.T, '-', label=bp.fname)
for emline in fs.emlines:
if emline.fwhm_kms is not None:
for wav0, strength, fwhm in zip(emline.wave, emline.intensity,
emline.fwhm_angstrom):
gauss = pysynphot.GaussianSource(1.0, wav0, fwhm)
ax.plot(gauss.wave,
strength*gauss.flux*fs.bandpasses[0].T.max()/(emline.intensity[0]*gauss.flux.max()),
label='{:.2f} A'.format(wav0))
ax.set_title(title)
ax.set_xlim(min([bp.wav0 - bp.Wj for bp in fs.bandpasses[:2]]),
max([bp.wav0 + bp.Wj for bp in fs.bandpasses[:2]]))
ax.legend()
plotfile = os.path.join("plots", '{}.pdf'.format(this_func_name(2)))
fig.savefig(plotfile)
def plot_composite_bandpass(cbp, emline=None):
title = this_func_name(2)
fig, ax = plt.subplots()
# Plot the composite bandpass
ax.plot(cbp.wave, cbp.T, '-', label='composite')
# Plot the individual constituent bandpasses
for bp in cbp.bandpasses:
ax.plot(bp.wave, bp.T, '-', label=bp.fname)
# Plot Gaussian profiles of all components of emission line multiplet
if emline is not None:
if emline.fwhm_kms is not None:
title += ' {}, V = {:.1f} km/s, W = {:.1f} km/s'.format(
emline.lineid, emline.velocity, emline.fwhm_kms)
for wav0, strength, fwhm in zip(emline.wave, emline.intensity,
emline.fwhm_angstrom):
gauss = pysynphot.GaussianSource(1.0, wav0, fwhm)
ax.plot(gauss.wave, gauss.flux*cbp.T.max()/gauss.flux.max(),
label='{:.2f} A'.format(wav0))
ax.set_xlim(cbp.wav0 - cbp.Wj, cbp.wav0 + cbp.Wj)
ax.set_title(title)
ax.legend()
plotfile = os.path.join("plots", '{}.pdf'.format(this_func_name(2)))
fig.savefig(plotfile)
def test_twin_sii_filter():
fnames = ['wfc3,uvis1,FQ672N', 'wfc3,uvis1,FQ674N']
cbp = nebulio.CompositeBandpass(fnames)
sii_doublet = nebulio.EmissionLine("[S II] 6724", velocity=25.0, fwhm_kms=40.0)
plot_composite_bandpass(cbp, sii_doublet)
assert cbp.Wj*cbp.Tm == np.sum([bp.Wj*bp.Tm for bp in cbp.bandpasses])
def test_filterset_with_composite_sii():
fs = nebulio.Filterset(
bpnames=[['wfc3,uvis1,FQ672N', 'wfc3,uvis1,FQ674N'],
"wfc3,uvis1,F673N", "wfc3,uvis1,F547M"],
lineids=['[S II] 6724', '[S II] 6724'],
velocity=25.0, fwhm_kms=20.0
)
print(fs.__dict__)
plot_filterset(fs)
assert True # what to test?
def test_filterset_with_nii_ha():
fs = nebulio.Filterset(
bpnames=["wfc3,uvis1,F658N", "wfc3,uvis1,F656N", "wfc3,uvis1,F547M"],
lineids=['[N II] 6583', 'H I 6563'],
velocity=25.0, fwhm_kms=20.0
)
print(fs.__dict__)
plot_filterset(fs)
assert True # what to test?
| mit |
TK-TarunW/ecosystem | spark-2.0.2-bin-hadoop2.7/python/pyspark/sql/context.py | 3 | 22432 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession(sparkContext)
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
self.sparkSession.catalog.registerFunction(name, f, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.0.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
def __init__(self, sparkContext, jhiveContext=None):
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
cfei18/incubator-airflow | airflow/contrib/hooks/bigquery_hook.py | 2 | 63861 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import time
from builtins import range
from past.builtins import basestring
from airflow import AirflowException
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from airflow.utils.log.logging_mixin import LoggingMixin
from apiclient.discovery import HttpError, build
from googleapiclient import errors
from pandas_gbq.gbq import \
_check_google_client_version as gbq_check_google_client_version
from pandas_gbq import read_gbq
from pandas_gbq.gbq import \
_test_google_api_imports as gbq_test_google_api_imports
from pandas_gbq.gbq import GbqConnector
class BigQueryHook(GoogleCloudBaseHook, DbApiHook, LoggingMixin):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None,
use_legacy_sql=True):
super(BigQueryHook, self).__init__(
gcp_conn_id=bigquery_conn_id, delegate_to=delegate_to)
self.use_legacy_sql = use_legacy_sql
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(
service=service,
project_id=project,
use_legacy_sql=self.use_legacy_sql)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build(
'bigquery', 'v2', http=http_authorized, cache_discovery=False)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None, dialect=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:type sql: string
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:type dialect: string in {'legacy', 'standard'}
"""
if dialect is None:
dialect = 'legacy' if self.use_legacy_sql else 'standard'
return read_gbq(sql,
project_id=self._get_field('project'),
dialect=dialect,
verbose=False)
def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: string
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: string
:param table_id: The name of the table to check the existence of.
:type table_id: string
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id, datasetId=dataset_id,
tableId=table_id).execute()
return True
except errors.HttpError as e:
if e.resp['status'] == '404':
return False
raise
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self,
project_id,
service,
reauth=False,
verbose=False,
dialect='legacy'):
super(BigQueryPandasConnector, self).__init__(project_id)
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
self.dialect = dialect
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(LoggingMixin):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id, use_legacy_sql=True):
self.service = service
self.project_id = project_id
self.use_legacy_sql = use_legacy_sql
self.running_job_id = None
def create_empty_table(self,
project_id,
dataset_id,
table_id,
schema_fields=None,
time_partitioning={}
):
"""
Creates a new, empty table in the dataset.
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema_fields: list
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:return:
"""
project_id = project_id if project_id is not None else self.project_id
table_resource = {
'tableReference': {
'tableId': table_id
}
}
if schema_fields:
table_resource['schema'] = {'fields': schema_fields}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
self.log.info('Creating Table %s:%s.%s',
project_id, dataset_id, table_id)
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
self.log.info('Table created successfully: %s:%s.%s',
project_id, dataset_id, table_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def create_external_table(self,
external_project_dataset_table,
schema_fields,
source_uris,
source_format='CSV',
autodetect=False,
compression='NONE',
ignore_unknown_values=False,
max_bad_records=0,
skip_leading_rows=0,
field_delimiter=',',
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
src_fmt_configs={}
):
"""
Creates a new external table in the dataset with the data in Google
Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
for more details about these parameters.
:param external_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table>($<partition>) BigQuery
table name to create external table.
If <project> is not included, project will be the
project defined in the connection json.
:type external_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param autodetect: Try to detect schema and format options automatically.
Any option specified explicitly will be honored.
:type autodetect: bool
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: string
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: string
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: boolean
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
"""
project_id, dataset_id, external_table_id = \
_split_tablename(table_input=external_project_dataset_table,
default_project_id=self.project_id,
var_name='external_project_dataset_table')
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.sourceFormat
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
compression = compression.upper()
allowed_compressions = ['NONE', 'GZIP']
if compression not in allowed_compressions:
raise ValueError("{0} is not a valid compression format. "
"Please use one of the following types: {1}"
.format(compression, allowed_compressions))
table_resource = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values
},
'tableReference': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': external_table_id,
}
}
if schema_fields:
table_resource['externalDataConfiguration'].update({
'schema': {
'fields': schema_fields
}
})
self.log.info('Creating external table: %s', external_project_dataset_table)
if max_bad_records:
table_resource['externalDataConfiguration']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'quote_character' not in src_fmt_configs:
src_fmt_configs['quote'] = quote_character
if 'allowQuotedNewlines' not in src_fmt_configs:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
if 'allowJaggedRows' not in src_fmt_configs:
src_fmt_configs['allowJaggedRows'] = allow_jagged_rows
src_fmt_to_param_mapping = {
'CSV': 'csvOptions',
'GOOGLE_SHEETS': 'googleSheetsOptions'
}
src_fmt_to_configs_mapping = {
'csvOptions': [
'allowJaggedRows', 'allowQuotedNewlines',
'fieldDelimiter', 'skipLeadingRows',
'quote'
],
'googleSheetsOptions': ['skipLeadingRows']
}
if source_format in src_fmt_to_param_mapping.keys():
valid_configs = src_fmt_to_configs_mapping[
src_fmt_to_param_mapping[source_format]
]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
table_resource['externalDataConfiguration'][src_fmt_to_param_mapping[
source_format]] = src_fmt_configs
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource
).execute()
self.log.info('External table created successfully: %s',
external_project_dataset_table)
except HttpError as err:
raise Exception(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def run_query(self,
bql=None,
sql=None,
destination_dataset_table=False,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=False,
udf_config=False,
use_legacy_sql=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
query_params=None,
schema_update_options=(),
priority='INTERACTIVE',
time_partitioning={}):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param bql: (Deprecated. Use `sql` parameter instead) The BigQuery SQL
to execute.
:type bql: string
:param sql: The BigQuery SQL to execute.
:type sql: string
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:type destination_dataset_table: string
:param write_disposition: What to do if the table already exists in
BigQuery.
:type write_disposition: string
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allowLargeResults``
must be true if this is set to false. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: boolean
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
If `None`, defaults to `self.use_legacy_sql`.
:type use_legacy_sql: boolean
:type udf_config: list
:param maximum_billing_tier: Positive integer that serves as a
multiplier of the basic price.
:type maximum_billing_tier: integer
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param create_disposition: Specifies whether the job is allowed to
create new tables.
:type create_disposition: string
:param query_params a dictionary containing query parameter types and
values, passed to BigQuery
:type query_params: dict
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the query job.
:type schema_update_options: tuple
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: string
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and
expiration as per API specifications. Note that 'field' is not available in
conjunction with dataset.table$partition.
:type time_partitioning: dict
"""
# TODO remove `bql` in Airflow 2.0 - Jira: [AIRFLOW-2513]
sql = bql if sql is None else sql
if bql:
import warnings
warnings.warn('Deprecated parameter `bql` used in '
'`BigQueryBaseCursor.run_query` '
'Use `sql` parameter instead to pass the sql to be '
'executed. `bql` parameter is deprecated and '
'will be removed in a future version of '
'Airflow.',
category=DeprecationWarning)
if sql is None:
raise TypeError('`BigQueryBaseCursor.run_query` missing 1 required '
'positional argument: `sql`')
# BigQuery also allows you to define how you want a table's schema to change
# as a side effect of a query job
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options. "
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
if use_legacy_sql is None:
use_legacy_sql = self.use_legacy_sql
configuration = {
'query': {
'query': sql,
'useLegacySql': use_legacy_sql,
'maximumBillingTier': maximum_billing_tier,
'maximumBytesBilled': maximum_bytes_billed,
'priority': priority
}
}
if destination_dataset_table:
assert '.' in destination_dataset_table, (
'Expected destination_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(destination_dataset_table)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
configuration['query'].update({
'allowLargeResults': allow_large_results,
'flattenResults': flatten_results,
'writeDisposition': write_disposition,
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
})
if udf_config:
assert isinstance(udf_config, list)
configuration['query'].update({
'userDefinedFunctionResources': udf_config
})
if query_params:
if self.use_legacy_sql:
raise ValueError("Query paramaters are not allowed when using "
"legacy SQL")
else:
configuration['query']['queryParameters'] = query_params
time_partitioning = _cleanse_time_partitioning(
destination_dataset_table,
time_partitioning
)
if time_partitioning:
configuration['query'].update({
'timePartitioning': time_partitioning
})
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options))
configuration['query'][
'schemaUpdateOptions'] = schema_update_options
return self.run_with_configuration(configuration)
def run_extract( # noqa
self,
source_project_dataset_table,
destination_cloud_storage_uris,
compression='NONE',
export_format='CSV',
field_delimiter=',',
print_header=True):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type export_format: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED'):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(project:|project.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: (project:|project.)<dataset>.<table>
:type destination_project_dataset_table: string
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
"""
source_project_dataset_tables = ([
source_project_dataset_tables
] if not isinstance(source_project_dataset_tables, list) else
source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId':
source_project,
'datasetId':
source_dataset,
'tableId':
source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
schema_fields,
source_uris,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
schema_update_options=(),
src_fmt_configs={},
time_partitioning={}):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table>($<partition>) BigQuery
table to load data into. If <project> is not included, project will be the
project defined in the connection json. If a partition is specified the
operator will automatically append the data, create a new partition or create
a new DAY partitioned table.
:type destination_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: string
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: boolean
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the load job.
:type schema_update_options: tuple
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and
expiration as per API specifications. Note that 'field' is not available in
conjunction with dataset.table$partition.
:type time_partitioning: dict
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options. "
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
'ignoreUnknownValues': ignore_unknown_values
}
}
time_partitioning = _cleanse_time_partitioning(
destination_project_dataset_table,
time_partitioning
)
if time_partitioning:
configuration['load'].update({
'timePartitioning': time_partitioning
})
if schema_fields:
configuration['load']['schema'] = {'fields': schema_fields}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options))
configuration['load'][
'schemaUpdateOptions'] = schema_update_options
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'ignoreUnknownValues' not in src_fmt_configs:
src_fmt_configs['ignoreUnknownValues'] = ignore_unknown_values
if quote_character is not None:
src_fmt_configs['quote'] = quote_character
if allow_quoted_newlines:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
src_fmt_to_configs_mapping = {
'CSV': [
'allowJaggedRows', 'allowQuotedNewlines', 'autodetect',
'fieldDelimiter', 'skipLeadingRows', 'ignoreUnknownValues',
'nullMarker', 'quote'
],
'DATASTORE_BACKUP': ['projectionFields'],
'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'],
'PARQUET': ['autodetect', 'ignoreUnknownValues'],
'AVRO': [],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
configuration['load'].update(src_fmt_configs)
if allow_jagged_rows:
configuration['load']['allowJaggedRows'] = allow_jagged_rows
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {'configuration': configuration}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
self.running_job_id = query_reply['jobReference']['jobId']
# Wait for query to finish.
keep_polling_job = True
while (keep_polling_job):
try:
job = jobs.get(
projectId=self.project_id,
jobId=self.running_job_id).execute()
if (job['status']['state'] == 'DONE'):
keep_polling_job = False
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.
format(job['status']['errorResult'], job))
else:
self.log.info('Waiting for job to complete : %s, %s',
self.project_id, self.running_job_id)
time.sleep(5)
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error, waiting for job to complete: %s',
err.resp.status, self.running_job_id)
time.sleep(5)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s',
err.resp.status)
return self.running_job_id
def poll_job_complete(self, job_id):
jobs = self.service.jobs()
try:
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
if (job['status']['state'] == 'DONE'):
return True
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error while polling job with id %s',
err.resp.status, job_id)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s',
err.resp.status)
return False
def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if (self.running_job_id and
not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id,
self.running_job_id)
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id).execute()
else:
self.log.info('No running BigQuery jobs to cancel.')
return
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while (polling_attempts < max_polling_attempts and not job_complete):
polling_attempts = polling_attempts + 1
job_complete = self.poll_job_complete(self.running_job_id)
if (job_complete):
self.log.info('Job successfully canceled: %s, %s',
self.project_id, self.running_job_id)
elif (polling_attempts == max_polling_attempts):
self.log.info(
"Stopping polling due to timeout. Job with id %s "
"has not completed cancel and may or may not finish.",
self.running_job_id)
else:
self.log.info('Waiting for canceled job with id %s to finish.',
self.running_job_id)
time.sleep(5)
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, selected_fields=None, page_token=None,
start_index=None):
"""
Get the data of a given dataset.table and optionally with selected columns.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if selected_fields:
optional_params['selectedFields'] = selected_fields
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (self.service.tabledata().list(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id,
**optional_params).execute())
def run_table_delete(self, deletion_dataset_table,
ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: boolean
:return:
"""
assert '.' in deletion_dataset_table, (
'Expected deletion_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(deletion_dataset_table)
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
self.log.info('Deleted table %s:%s.%s.', deletion_project,
deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception('Table deletion failed. Table does not exist.')
else:
self.log.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(
projectId=project_id, datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
self.log.info('Table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
self.log.info('Table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project=None,
view_project=None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(
projectId=source_project, datasetId=source_dataset).execute()
access = source_dataset_resource[
'access'] if 'access' in source_dataset_resource else []
view_access = {
'view': {
'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table
}
}
# check to see if the view we want to add already exists.
if view_access not in access:
self.log.info(
'Granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
access.append(view_access)
return self.service.datasets().patch(
projectId=source_project,
datasetId=source_dataset,
body={
'access': access
}).execute()
else:
# if view is already in access, do nothing.
self.log.info(
'Table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
return source_dataset_resource
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id, use_legacy_sql=True):
super(BigQueryCursor, self).__init__(
service=service,
project_id=project_id,
use_legacy_sql=use_legacy_sql)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: string
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
sql = _bind_parameters(operation,
parameters) if parameters else operation
self.job_id = self.run_query(sql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: string
:param seq_of_parameters: List of dictionary parameters to substitute into the
query.
:type seq_of_parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (self.service.jobs().getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token).execute())
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences
(e.g. a list of tuples). An empty sequence is returned when no more rows are
available. The number of rows to fetch per call is specified by the parameter.
If it is not given, the cursor's arraysize determines the number of rows to be
fetched. The method should try to fetch as many rows as indicated by the size
parameter. If this is not possible due to the specified number of rows not being
available, fewer rows may be returned. An :py:class:`~pyhive.exc.Error`
(or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of
sequences (e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in parameters.iteritems():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER':
return int(string_field)
elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP':
return float(string_field)
elif bq_type == 'BOOLEAN':
assert string_field in set(['true', 'false'])
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
assert default_project_id is not None, "INTERNAL: No default project is specified"
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
if table_input.count('.') + table_input.count(':') > 3:
raise Exception(('{var}Use either : or . to specify project '
'got {input}').format(
var=var_print(var_name), input=table_input))
cmpt = table_input.rsplit(':', 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(':') <= 1:
if cmpt[-1].count('.') != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception(('{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}').format(
var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
assert project_id is None, ("{var}Use either : or . to specify project"
).format(var=var_print(var_name))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception(
('{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}').format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
log = LoggingMixin().log
log.info('Project not included in {var}: {input}; '
'using project "{project}"'.format(
var=var_name,
input=table_input,
project=default_project_id))
project_id = default_project_id
return project_id, dataset_id, table_id
def _cleanse_time_partitioning(destination_dataset_table, time_partitioning_in):
# if it is a partitioned table ($ is in the table name) add partition load option
time_partitioning_out = {}
if destination_dataset_table and '$' in destination_dataset_table:
assert not time_partitioning_in.get('field'), (
"Cannot specify field partition and partition name "
"(dataset.table$partition) at the same time"
)
time_partitioning_out['type'] = 'DAY'
time_partitioning_out.update(time_partitioning_in)
return time_partitioning_out
| apache-2.0 |
maythapk/sklearn_pycon2014 | notebooks/fig_code/svm_gui.py | 47 | 11549 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('key_press_event', self.onkeypress)
canvas.mpl_connect('key_release_event', self.onkeyrelease)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.shift_down = False
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onkeypress(self, event):
if event.key == "shift":
self.shift_down = True
def onkeyrelease(self, event):
if event.key == "shift":
self.shift_down = False
def onclick(self, event):
if event.xdata and event.ydata:
if self.shift_down or event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
elif event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
manterd/myPhyloDB | functions/analysis/anova_graphs.py | 1 | 80391 | import datetime
from django.http import HttpResponse
import logging
import json
import numpy as np
import pandas as pd
from pyper import *
from scipy import stats
import zipfile
from database.models import Sample
import functions
LOG_FILENAME = 'error_log.txt'
pd.set_option('display.max_colwidth', -1)
def getCatUnivData(request, RID, stops, PID):
try:
while True: # why is this looped?
if request.is_ajax():
# Get variables from web page
allJson = request.body.split('&')[0]
all = json.loads(allJson)
functions.setBase(RID, 'Step 1 of 4: Selecting your chosen meta-variables...')
selectAll = int(all["selectAll"])
keggAll = int(all["keggAll"])
nzAll = int(all["nzAll"])
sig_only = int(all["sig_only"])
metaValsCat = all['metaValsCat']
metaIDsCat = all['metaIDsCat']
metaValsQuant = all['metaValsQuant']
metaIDsQuant = all['metaIDsQuant']
treeType = int(all['treeType'])
DepVar = int(all["DepVar"])
# Create meta-variable DataFrame, final sample list, final category and quantitative field lists based on tree selections
savedDF, metaDF, finalSampleIDs, catFields, remCatFields, quantFields, catValues, quantValues = functions.getMetaDF(request.user, metaValsCat, metaIDsCat, metaValsQuant, metaIDsQuant, DepVar, levelDep=True)
allFields = catFields + quantFields
if not catFields:
error = "Selected categorical variable(s) contain only one level.\nPlease select different variable(s)."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
if not finalSampleIDs:
error = "No valid samples were contained in your final dataset.\nPlease select different variable(s)."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
result = ''
result += 'Categorical variables selected by user: ' + ", ".join(catFields + remCatFields) + '\n'
result += 'Categorical variables not included in the statistical analysis (contains only 1 level): ' + ", ".join(remCatFields) + '\n'
result += 'Quantitative variables selected by user: ' + ", ".join(quantFields) + '\n'
result += '===============================================\n\n'
functions.setBase(RID, 'Step 1 of 4: Selecting your chosen meta-variables...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
# END VALIDATE
# START QUERY
functions.setBase(RID, 'Step 2 of 4: Selecting your chosen taxa or KEGG level...')
# filter otus based on user settings
remUnclass = all['remUnclass']
remZeroes = all['remZeroes']
perZeroes = int(all['perZeroes'])
filterData = all['filterData']
filterPer = int(all['filterPer'])
filterMeth = int(all['filterMeth'])
mapTaxa = all['map_taxa']
finalDF = pd.DataFrame()
allDF = pd.DataFrame()
if treeType == 1:
if selectAll == 0 or selectAll == 8:
taxaString = all["taxa"]
taxaDict = json.JSONDecoder(object_pairs_hook=functions.multidict).decode(taxaString)
filteredDF = savedDF.copy()
else:
taxaDict = ''
filteredDF = functions.filterDF(savedDF, DepVar, selectAll, remUnclass, remZeroes, perZeroes, filterData, filterPer, filterMeth)
finalDF, missingList = functions.getTaxaDF(selectAll, taxaDict, filteredDF, metaDF, allFields, DepVar, RID, stops, PID)
if selectAll == 8:
result += '\nThe following PGPRs were not detected: ' + ", ".join(missingList) + '\n'
result += '===============================================\n'
if treeType == 2:
keggDict = ''
if keggAll == 0:
keggString = all["kegg"]
keggDict = json.JSONDecoder(object_pairs_hook=functions.multidict).decode(keggString)
finalDF, allDF = functions.getKeggDF(keggAll, keggDict, savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
if treeType == 3:
keggDict = ''
if nzAll == 0:
keggString = all["nz"]
keggDict = json.JSONDecoder(object_pairs_hook=functions.multidict).decode(keggString)
finalDF, allDF = functions.getNZDF(nzAll, keggDict, savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
if finalDF.empty:
error = "Selected taxa were not found in your selected samples."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
# make sure column types are correct
finalDF[catFields] = finalDF[catFields].astype(str)
finalDF[quantFields] = finalDF[quantFields].astype(float)
# transform Y, if requested
transform = int(all["transform"])
finalDF = functions.transformDF(transform, DepVar, finalDF)
# save location info to session
myDir = 'myPhyloDB/media/temp/anova/'
if not os.path.exists(myDir):
os.makedirs(myDir)
path = str(myDir) + str(RID) + '.biom'
functions.imploding_panda(path, treeType, DepVar, finalSampleIDs, metaDF, finalDF)
functions.setBase(RID, 'Step 2 of 4: Selecting your chosen taxa or KEGG level...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
# END QUERY
# START STATS
functions.setBase(RID, 'Step 3 of 4: Performing statistical test...')
finalDict = {}
seriesList = []
xAxisDict = {}
yAxisDict = {}
if os.name == 'nt':
r = R(RCMD="R/R-Portable/App/R-Portable/bin/R.exe", use_pandas=True)
else:
r = R(RCMD="R/R-Linux/bin/R", use_pandas=True)
functions.setBase(RID, 'Verifying R packages...missing packages are being installed')
# R packages from cran
r("list.of.packages <- c('lsmeans', 'ggplot2', 'RColorBrewer', 'ggthemes')")
r("new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,'Package'])]")
print r("if (length(new.packages)) install.packages(new.packages, repos='http://cran.us.r-project.org', dependencies=T)")
functions.setBase(RID, 'Step 3 of 4: Performing statistical test...')
print r("library(lsmeans)")
print r("library(ggplot2)")
print r("library(ggthemes)")
print r("library(RColorBrewer)")
print r('source("R/myFunctions/myFunctions.R")')
# R graph
r.assign('finalDF', finalDF)
colorVal = all['colorVal']
if colorVal != 'None':
r.assign('colorVal', colorVal)
else:
r.assign('colorVal', 'rank_name')
xVal = all['xVal']
if xVal != 'None':
r.assign('xVal', xVal)
else:
r.assign('xVal', catFields[0])
gridVal_X = all['gridVal_X']
r.assign('gridVal_X', gridVal_X)
gridVal_Y = all['gridVal_Y']
r.assign('gridVal_Y', gridVal_Y)
if DepVar == 0:
r('DepVar <- "abund"')
elif DepVar == 1:
r('DepVar <- "rel_abund"')
elif DepVar == 2:
r('DepVar <- "rich"')
elif DepVar == 3:
r('DepVar <- "diversity"')
elif DepVar == 4:
r('DepVar <- "abund_16S"')
if gridVal_X == 'None' and gridVal_Y == 'None':
r("gDF <- data.frame(x=finalDF[,paste(xVal)], y=finalDF[,paste(DepVar)], \
myFill=finalDF[,paste(colorVal)])")
r("gDF <- aggregate(gDF[, 'y'], list(gDF$x, gDF$myFill), mean)")
r("names(gDF) <- c('x', 'myFill', 'y')")
elif gridVal_X != 'None' and gridVal_Y == 'None':
r("gDF <- data.frame(x=finalDF[,paste(xVal)], y=finalDF[,paste(DepVar)], \
gridVal_X=finalDF[,paste(gridVal_X)], \
myFill=finalDF[,paste(colorVal)])")
r("gDF <- aggregate(gDF[, 'y'], list(gDF$x, gDF$myFill, gDF$gridVal_X), mean)")
r("names(gDF) <- c('x', 'myFill', 'gridVal_X', 'y')")
elif gridVal_X == 'None' and gridVal_Y != 'None':
r("gDF <- data.frame(x=finalDF[,paste(xVal)], y=finalDF[,paste(DepVar)], \
gridVal_Y=finalDF[,paste(gridVal_Y)], \
myFill=finalDF[,paste(colorVal)])")
r("gDF <- aggregate(gDF[, 'y'], list(gDF$x, gDF$myFill, gDF$gridVal_Y), mean)")
r("names(gDF) <- c('x', 'myFill', 'gridVal_Y', 'y')")
elif gridVal_X != 'None' and gridVal_Y != 'None':
r("gDF <- data.frame(x=finalDF[,paste(xVal)], y=finalDF[,paste(DepVar)], \
gridVal_X=finalDF[,paste(gridVal_X)], gridVal_Y=finalDF[,paste(gridVal_Y)], \
myFill=finalDF[,paste(colorVal)])")
r("gDF <- aggregate(gDF[, 'y'], list(gDF$x, gDF$myFill, gDF$gridVal_X, gDF$gridVal_Y), mean)")
r("names(gDF) <- c('x', 'myFill', 'gridVal_X', 'gridVal_Y', 'y')")
r("p <- ggplot(gDF, aes(x=x, y=y, fill=myFill))")
r("p <- p + geom_bar(stat='identity', position='stack')")
if gridVal_X != 'None' and gridVal_Y == 'None':
r("p <- p + facet_grid(. ~ gridVal_X)")
r("p <- p + theme(strip.text.x=element_text(size=10, colour='blue', angle=0))")
elif gridVal_X == 'None' and gridVal_Y != 'None':
r("p <- p + facet_grid(gridVal_Y ~ .)")
r("p <- p + theme(strip.text.y=element_text(size=10, colour='blue', angle=90))")
elif gridVal_X != 'None' and gridVal_Y != 'None':
r("p <- p + facet_grid(gridVal_Y ~ gridVal_X)")
r("p <- p + theme(strip.text.x=element_text(size=10, colour='blue', angle=0))")
r("p <- p + theme(strip.text.y=element_text(size=10, colour='blue', angle=90))")
r("p <- p + theme(strip.text.x=element_text(size=10, colour='blue', angle=0))")
r("p <- p + theme(strip.text.y=element_text(size=10, colour='blue', angle=90))")
palette = all['palette']
r.assign('palette', palette)
if palette == 'gdocs':
r('pal <- gdocs_pal()(20)')
elif palette == 'hc':
r('pal <- hc_pal()(10)')
elif palette == 'Set1':
r('pal <- brewer.pal(8, "Set1")')
elif palette == 'Set2':
r('pal <- brewer.pal(8, "Set2")')
elif palette == 'Set3':
r('pal <- brewer.pal(12, "Set3")')
elif palette == 'Paired':
r('pal <- brewer.pal(12, "Paired")')
elif palette == 'Dark2':
r('pal <- brewer.pal(12, "Dark2")')
elif palette == 'Accent':
r('pal <- brewer.pal(12, "Accent")')
r('nColors <- length(pal)')
r("p <- p + scale_fill_manual(values=rep(pal, ceiling(nlevels(gDF$myFill)/nColors)))")
r("p <- p + theme(legend.text=element_text(size=7))")
r("p <- p + theme(axis.text.x=element_text(angle=45, vjust=1, hjust=1))")
r("p <- p + theme(legend.title=element_blank())")
r("p <- p + theme(legend.position='bottom')")
r("p <- p + guides(fill=guide_legend(ncol=8))")
if DepVar == 0:
r("p <- p + ylab('Abundance') + xlab('')")
elif DepVar == 1:
r("p <- p + ylab('Relative Abundance') + xlab('')")
elif DepVar == 2:
r("p <- p + ylab('OTU Richness') + xlab('')")
elif DepVar == 3:
r("p <- p + ylab('OTU Diversity') + xlab('')")
elif DepVar == 4:
r("p <- p + ylab('Total Abundance') + xlab('')")
path = "myPhyloDB/media/temp/anova/Rplots"
if not os.path.exists(path):
os.makedirs(path)
r.assign("path", path)
r.assign("RID", RID)
r("file <- paste(path, '/', RID, '.anova.pdf', sep='')")
r("p <- set_panel_size(p, height=unit(2.9, 'in'), width=unit(2.9, 'in'))")
r("nlev <- nlevels(as.factor(gDF$gridVal_X))")
r('if (nlev == 0) { \
myWidth <- 11 \
} else { \
myWidth <- 3*nlev+4 \
}')
r("nlev <- nlevels(as.factor(gDF$gridVal_Y))")
r("nRow <- ceiling(nlevels(gDF$myFill)/8)")
r('if (nlev == 0) { \
myHeight <- 8 \
} else { \
myHeight <- 2+(3*nlev)+(1*nRow) \
}')
r("ggsave(filename=file, plot=p, units='in', height=myHeight, width=myWidth, limitsize=F)")
# group DataFrame by each taxa level selected
grouped1 = finalDF.groupby(['rank_name', 'rank_id'])
pValDict = {}
counter = 1
for name1, group1 in grouped1:
D = ''
r.assign("df", group1)
trtString = " * ".join(allFields)
if DepVar == 0:
anova_string = "fit <- aov(df$abund ~ " + str(trtString) + ", data=df)"
r.assign("cmd", anova_string)
r("eval(parse(text=cmd))")
elif DepVar == 1:
anova_string = "fit <- aov(df$rel_abund ~ " + str(trtString) + ", data=df)"
r.assign("cmd", anova_string)
r("eval(parse(text=cmd))")
elif DepVar == 2:
anova_string = "fit <- aov(df$rich ~ " + str(trtString) + ", data=df)"
r.assign("cmd", anova_string)
r("eval(parse(text=cmd))")
elif DepVar == 3:
anova_string = "fit <- aov(df$diversity ~ " + str(trtString) + ", data=df)"
r.assign("cmd", anova_string)
r("eval(parse(text=cmd))")
elif DepVar == 4:
anova_string = "fit <- aov(df$abund_16S ~ " + str(trtString) + ", data=df)"
r.assign("cmd", anova_string)
r("eval(parse(text=cmd))")
aov = r("summary(fit)")
pString = r("summary(fit)[[1]][['Pr(>F)']]")
tempStuff = pString.split(' ')
pList = []
for part in tempStuff:
try:
pList.append(float(part))
except Exception:
pass
if pList:
p_val = min(pList)
tempStuff = aov.split('\n')
for part in tempStuff:
if part != tempStuff[0]:
D += part + '\n'
fList = []
for part in tempStuff:
if part != tempStuff[0] and part != tempStuff[1]:
part = part.replace('\r', '')
part1 = part.split(' ')
if part1[0] == 'Residuals':
break
if part1[0] not in fList:
fList.append(part1[0])
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
D += "\nLSmeans & Tukey's HSD post-hoc test:\n\n"
if len(quantFields) == 0:
for i in fList:
hsd_string = "lsm <- lsmeans(fit, list(pairwise ~ " + str(i) + "))"
r.assign("cmd", hsd_string)
r("eval(parse(text=cmd))")
r("options(width=5000)")
table = r("lsm")
tempStuff = table.split('\n')
for i in xrange(len(tempStuff)):
if i > 0:
D += tempStuff[i] + '\n'
else:
for i in fList:
if i not in quantFields:
hsd_string = "lsm <- lsmeans(fit, list(pairwise ~ " + str(i) + "))"
r.assign("cmd", hsd_string)
r("eval(parse(text=cmd))")
r("options(width=5000)")
table = r("lsm")
tempStuff = table.split('\n')
for i in xrange(len(tempStuff)):
if i > 0:
D += tempStuff[i] + '\n'
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
else:
p_val = 1.0
D = 'ANOVA cannot be performed, please check that you have more than one treatment level and appropriate replication.\n'
pValDict[name1] = p_val
result += 'Name: ' + str(name1[0]) + '\n'
result += 'ID: ' + str(name1[1]) + '\n'
if DepVar == 0:
result += 'Dependent Variable: Abundance' + '\n'
elif DepVar == 1:
result += 'Dependent Variable: Relative Abundance' + '\n'
elif DepVar == 2:
result += 'Dependent Variable: OTU Richness' + '\n'
elif DepVar == 3:
result += 'Dependent Variable: OTU Diversity' + '\n'
elif DepVar == 4:
result += 'Dependent Variable: Total Abundance' + '\n'
result += '\nANCOVA table:\n'
D = D.decode('utf-8')
result += D + '\n'
result += '===============================================\n'
result += '\n\n\n\n'
taxa_no = len(grouped1)
functions.setBase(RID, 'Step 3 of 4: Performing statistical test...taxa ' + str(counter) + ' of ' + str(taxa_no) + ' is complete!')
counter += 1
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 3 of 4: Performing statistical test...done!')
# END STATS
# START GRAPH
functions.setBase(RID, 'Step 4 of 4: Formatting graph data for display...')
grouped1 = finalDF.groupby(['rank_name', 'rank_id'])
for name1, group1 in grouped1:
dataList = []
errorList = []
pValue = pValDict[name1]
if sig_only == 0:
if DepVar == 0:
mean = group1.groupby(catFields)['abund'].mean()
se = group1.groupby(catFields)['abund'].std()
se.fillna(0, inplace=True)
high = [x + y for x, y in zip(mean, se)]
low = [x - y for x, y in zip(mean, se)]
dataList = list(mean)
errorTuple = zip(low, high)
errorList = [list(elem) for elem in errorTuple]
elif DepVar == 1:
mean = group1.groupby(catFields)['rel_abund'].mean()
se = group1.groupby(catFields)['rel_abund'].std()
se.fillna(0, inplace=True)
high = [x + y for x, y in zip(mean, se)]
low = [x - y for x, y in zip(mean, se)]
dataList = list(mean)
errorTuple = zip(low, high)
errorList = [list(elem) for elem in errorTuple]
elif DepVar == 2:
mean = group1.groupby(catFields)['rich'].mean()
se = group1.groupby(catFields)['rich'].std()
se.fillna(0, inplace=True)
high = [x + y for x, y in zip(mean, se)]
low = [x - y for x, y in zip(mean, se)]
dataList = list(mean)
errorTuple = zip(low, high)
errorList = [list(elem) for elem in errorTuple]
elif DepVar == 3:
mean = group1.groupby(catFields)['diversity'].mean()
se = group1.groupby(catFields)['diversity'].std()
se.fillna(0, inplace=True)
high = [x + y for x, y in zip(mean, se)]
low = [x - y for x, y in zip(mean, se)]
dataList = list(mean)
errorTuple = zip(low, high)
errorList = [list(elem) for elem in errorTuple]
elif DepVar == 4:
mean = group1.groupby(catFields)['abund_16S'].mean()
se = group1.groupby(catFields)['abund_16S'].std()
se.fillna(0, inplace=True)
high = [x + y for x, y in zip(mean, se)]
low = [x - y for x, y in zip(mean, se)]
dataList = list(mean)
errorTuple = zip(low, high)
errorList = [list(elem) for elem in errorTuple]
seriesDict = {}
seriesDict['name'] = name1
seriesDict['type'] = 'column'
seriesDict['data'] = dataList
seriesList.append(seriesDict)
seriesDict = {}
seriesDict['name'] = name1
seriesDict['type'] = 'errorbar'
seriesDict['visible'] = False
seriesDict['data'] = errorList
seriesList.append(seriesDict)
elif sig_only == 1:
if pValue < 0.05:
if DepVar == 0:
mean = group1.groupby(catFields)['abund'].mean()
se = group1.groupby(catFields)['abund'].std()
se.fillna(0, inplace=True)
high = [x + y for x, y in zip(mean, se)]
low = [x - y for x, y in zip(mean, se)]
dataList = list(mean)
errorTuple = zip(low, high)
errorList = [list(elem) for elem in errorTuple]
elif DepVar == 1:
mean = group1.groupby(catFields)['rel_abund'].mean()
se = group1.groupby(catFields)['rel_abund'].std()
se.fillna(0, inplace=True)
high = [x + y for x, y in zip(mean, se)]
low = [x - y for x, y in zip(mean, se)]
dataList = list(mean)
errorTuple = zip(low, high)
errorList = [list(elem) for elem in errorTuple]
elif DepVar == 2:
mean = group1.groupby(catFields)['rich'].mean()
se = group1.groupby(catFields)['rich'].std()
se.fillna(0, inplace=True)
high = [x + y for x, y in zip(mean, se)]
low = [x - y for x, y in zip(mean, se)]
dataList = list(mean)
errorTuple = zip(low, high)
errorList = [list(elem) for elem in errorTuple]
elif DepVar == 3:
mean = group1.groupby(catFields)['diversity'].mean()
se = group1.groupby(catFields)['diversity'].std()
se.fillna(0, inplace=True)
high = [x + y for x, y in zip(mean, se)]
low = [x - y for x, y in zip(mean, se)]
dataList = list(mean)
errorTuple = zip(low, high)
errorList = [list(elem) for elem in errorTuple]
elif DepVar == 4:
mean = group1.groupby(catFields)['abund_16S'].mean()
se = group1.groupby(catFields)['abund_16S'].std()
se.fillna(0, inplace=True)
high = [x + y for x, y in zip(mean, se)]
low = [x - y for x, y in zip(mean, se)]
dataList = list(mean)
errorTuple = zip(low, high)
errorList = [list(elem) for elem in errorTuple]
seriesDict = {}
seriesDict['name'] = name1
seriesDict['type'] = 'column'
seriesDict['data'] = dataList
seriesList.append(seriesDict)
seriesDict = {}
seriesDict['name'] = name1
seriesDict['type'] = 'errorbar'
seriesDict['visible'] = False
seriesDict['data'] = errorList
seriesList.append(seriesDict)
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
'''
catFieldsList = []
for i in catFields:
catFieldsList.append(len(group1.groupby(i)))
catFields = [x for (y, x) in sorted(zip(catFieldsList, catFields))]
'''
if DepVar == 0:
grouped2 = group1.groupby(catFields)['abund'].mean()
elif DepVar == 1:
grouped2 = group1.groupby(catFields)['rel_abund'].mean()
elif DepVar == 4:
grouped2 = group1.groupby(catFields)['abund_16S'].mean()
elif DepVar == 2:
grouped2 = group1.groupby(catFields)['rich'].mean()
elif DepVar == 3:
grouped2 = group1.groupby(catFields)['diversity'].mean()
else:
raise Exception("Something went horribly wrong")
if catFields.__len__() == 1:
xAxisDict['categories'] = grouped2.index.values.tolist()
else:
g2indexvals = grouped2.index.values
level = g2indexvals[0].__len__()
labelTree = recLabels(g2indexvals, level)
xAxisDict['categories'] = labelTree['categories']
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
yTitle = {}
if DepVar == 0:
yTitle['text'] = 'Abundance'
elif DepVar == 1:
yTitle['text'] = 'Relative Abundance'
elif DepVar == 2:
yTitle['text'] = 'OTU Richness'
elif DepVar == 3:
yTitle['text'] = 'OTU Diversity'
elif DepVar == 4:
yTitle['text'] = 'Total Abundance'
yTitle['style'] = {'fontSize': '18px', 'fontWeight': 'bold'}
if transform != 0:
tname = {
'1': "Ln", '2': "Log10", '3': "Sqrt", '4': "Logit", '5': "Arcsin"
}
yTitle['text'] = tname[str(transform)] + "(" + str(yTitle['text']) + ")"
yAxisDict['title'] = yTitle
xStyleDict = {'style': {'fontSize': '14px'}, 'rotation': 0}
xAxisDict['labels'] = xStyleDict
yStyleDict = {'style': {'fontSize': '14px'}}
yAxisDict['labels'] = yStyleDict
finalDict['series'] = seriesList
finalDict['xAxis'] = xAxisDict
finalDict['yAxis'] = yAxisDict
finalDict['text'] = result
if not seriesList:
finalDict['empty'] = 0
else:
finalDict['empty'] = 1
functions.setBase(RID, 'Step 4 of 4: Formatting graph data for display...done!')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
# datatable of taxa mapped to selected kegg orthologies
if not treeType == 1 and mapTaxa == 'yes':
myDir = 'myPhyloDB/media/temp/anova/'
fileName = str(myDir) + 'Mapped_Taxa.csv'
allDF.to_csv(fileName)
myDir = 'myPhyloDB/media/temp/anova/'
fileName2 = str(myDir) + 'Mapped_Taxa.gz'
zf = zipfile.ZipFile(fileName2, "w", zipfile.ZIP_DEFLATED, allowZip64=True)
zf.write(fileName, 'Mapped_Taxa.csv')
zf.close()
finalDict['resType'] = 'res'
finalDict['error'] = 'none'
res = json.dumps(finalDict)
return HttpResponse(res, content_type='application/json')
except Exception as e:
if not stops[PID] == RID:
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG,)
myDate = "\nDate: " + str(datetime.datetime.now()) + "\n"
logging.exception(myDate)
myDict = {}
myDict['error'] = "There was an error during your analysis:\nError: " + str(e.message) + "\nTimestamp: " + str(datetime.datetime.now())
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
def recLabels(lists, level):
if lists.__len__() == 0:
return {}
first = lists
splitset = []
for i in range(0, level):
children = []
parents = []
for set in first:
children.append(set[set.__len__()-1])
parents.append(set[0:set.__len__()-1])
first = parents
splitset.append(children)
return makeLabels(" ", splitset)
def makeLabels(name, list):
retDict = {}
if list.__len__() == 1:
# final layer
retDict['name'] = name
retDict['categories'] = list[0]
return retDict
# change here
children = []
first = list[list.__len__()-1][0]
iter = 0
start = 0
for stuff in list[list.__len__()-1]:
if stuff != first:
sublist = []
for otherstuff in list[0:list.__len__()-1]:
sublist.append(otherstuff[start:iter])
children.append(makeLabels(first, sublist))
first = stuff
start = iter
iter += 1
# Repeat else condition at the end of the list
sublist = []
for otherstuff in list[0:list.__len__()-1]:
sublist.append(otherstuff[start:iter])
children.append(makeLabels(first, sublist))
retDict['name'] = name
retDict['categories'] = children
return retDict
def getQuantUnivData(request, RID, stops, PID):
try:
while True:
if request.is_ajax():
# Get variables from web page
allJson = request.body.split('&')[0]
all = json.loads(allJson)
functions.setBase(RID, 'Step 1 of 4: Selecting your chosen meta-variables...')
selectAll = int(all["selectAll"])
keggAll = int(all["keggAll"])
nzAll = int(all["nzAll"])
sig_only = int(all["sig_only"])
# Select samples and meta-variables from savedDF
metaValsCat = all['metaValsCat']
metaIDsCat = all['metaIDsCat']
metaValsQuant = all['metaValsQuant']
metaIDsQuant = all['metaIDsQuant']
treeType = int(all['treeType'])
DepVar = int(all["DepVar"])
# Create meta-variable DataFrame, final sample list, final category and quantitative field lists based on tree selections
savedDF, metaDF, finalSampleIDs, catFields, remCatFields, quantFields, catValues, quantValues = functions.getMetaDF(request.user, metaValsCat, metaIDsCat, metaValsQuant, metaIDsQuant, DepVar, levelDep=True)
allFields = catFields + quantFields
if not finalSampleIDs:
error = "No valid samples were contained in your final dataset.\nPlease select different variable(s)."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
result = ''
result += 'Categorical variables selected by user: ' + ", ".join(catFields + remCatFields) + '\n'
result += 'Categorical variables not included in the statistical analysis (contains only 1 level): ' + ", ".join(remCatFields) + '\n'
result += 'Quantitative variables selected by user: ' + ", ".join(quantFields) + '\n'
result += '===============================================\n\n'
functions.setBase(RID, 'Step 1 of 4: Selecting your chosen meta-variables...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ #
functions.setBase(RID, 'Step 2 of 4: Selecting your chosen taxa or kegg level...')
# filter otus based on user settings
remUnclass = all['remUnclass']
remZeroes = all['remZeroes']
perZeroes = int(all['perZeroes'])
filterData = all['filterData']
filterPer = int(all['filterPer'])
filterMeth = int(all['filterMeth'])
mapTaxa = all['map_taxa']
finalDF = pd.DataFrame()
allDF = pd.DataFrame()
if treeType == 1:
if selectAll == 0 or selectAll == 8:
taxaString = all["taxa"]
taxaDict = json.JSONDecoder(object_pairs_hook=functions.multidict).decode(taxaString)
filteredDF = savedDF.copy()
else:
taxaDict = ''
filteredDF = functions.filterDF(savedDF, DepVar, selectAll, remUnclass, remZeroes, perZeroes, filterData, filterPer, filterMeth)
finalDF, missingList = functions.getTaxaDF(selectAll, taxaDict, filteredDF, metaDF, allFields, DepVar, RID, stops, PID)
if selectAll == 8:
result += '\nThe following PGPRs were not detected: ' + ", ".join(missingList) + '\n'
result += '===============================================\n'
if treeType == 2:
keggDict = ''
if keggAll == 0:
keggString = all["kegg"]
keggDict = json.JSONDecoder(object_pairs_hook=functions.multidict).decode(keggString)
finalDF, allDF = functions.getKeggDF(keggAll, keggDict, savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
if treeType == 3:
keggDict = ''
if nzAll == 0:
keggString = all["nz"]
keggDict = json.JSONDecoder(object_pairs_hook=functions.multidict).decode(keggString)
finalDF, allDF = functions.getNZDF(nzAll, keggDict, savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
# make sure column types are correct
finalDF[catFields] = finalDF[catFields].astype(str)
finalDF[quantFields] = finalDF[quantFields].astype(float)
# transform Y, if requested
transform = int(all["transform"])
finalDF = functions.transformDF(transform, DepVar, finalDF)
# save location info to session
myDir = 'myPhyloDB/media/temp/anova/'
if not os.path.exists(myDir):
os.makedirs(myDir)
path = str(myDir) + str(RID) + '.biom'
functions.imploding_panda(path, treeType, DepVar, finalSampleIDs, metaDF, finalDF)
functions.setBase(RID, 'Step 2 of 4: Selecting your chosen taxa or KEGG level...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ #
functions.setBase(RID, 'Step 3 of 4: Performing statistical test...!')
finalDict = {}
# group DataFrame by each taxa level selected
shapes = ['circle', 'square', 'triangle', 'triangle-down', 'diamond']
if os.name == 'nt':
r = R(RCMD="R/R-Portable/App/R-Portable/bin/R.exe", use_pandas=True)
else:
r = R(RCMD="R/R-Linux/bin/R", use_pandas=True)
functions.setBase(RID, 'Verifying R packages...missing packages are being installed')
# R packages from cran
r("list.of.packages <- c('ggplot2', 'RColorBrewer', 'ggthemes')")
r("new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,'Package'])]")
print r("if (length(new.packages)) install.packages(new.packages, repos='http://cran.us.r-project.org', dependencies=T)")
functions.setBase(RID, 'Step 3 of 4: Performing statistical test...')
print r("library(ggplot2)")
print r("library(ggthemes)")
print r("library(RColorBrewer)")
print r('source("R/myFunctions/myFunctions.R")')
# R graph
r.assign('finalDF', finalDF)
colorVal = all['colorVal']
if colorVal == 'None':
r("colorTrt <- c('All')")
else:
r.assign("colorVal", colorVal)
r("colorTrt <- as.factor(finalDF[,paste(colorVal)])")
r.assign('xVal', quantFields[0])
gridVal_X = all['gridVal_X']
if gridVal_X == 'None':
r("gridTrt_X <- c('All')")
else:
r.assign("gridVal_X", gridVal_X)
r("gridTrt_X <- as.factor(finalDF[,paste(gridVal_X)])")
gridVal_Y = all['gridVal_Y']
if gridVal_Y == 'None':
r("gridTrt_Y <- c('All')")
else:
r.assign("gridVal_Y", gridVal_Y)
r("gridTrt_Y <- as.factor(finalDF[,paste(gridVal_Y)])")
shapeVal = all['shapeVal']
if shapeVal == 'None':
r("shapeTrt <- c('All')")
else:
r.assign("shapeVal", shapeVal)
r("shapeTrt <- as.factor(finalDF[,paste(shapeVal)])")
if DepVar == 0:
r('DepVar <- "abund"')
elif DepVar == 1:
r('DepVar <- "rel_abund"')
elif DepVar == 2:
r('DepVar <- "rich"')
elif DepVar == 3:
r('DepVar <- "diversity"')
elif DepVar == 4:
r('DepVar <- "abund_16S"')
r("gDF <- data.frame(x=finalDF[,paste(xVal)], y=finalDF[,paste(DepVar)], \
gridVal_X=gridTrt_X, gridVal_Y=gridTrt_Y, \
myColor=colorTrt, myShape=shapeTrt)")
r("p <- ggplot(gDF, aes(x=x, y=y, fill=factor(myColor), shape=factor(myShape)) )")
r("p <- p + geom_point(size=4)")
if gridVal_X != 'None' and gridVal_Y == 'None':
r("p <- p + facet_grid(. ~ gridVal_X)")
elif gridVal_X == 'None' and gridVal_Y != 'None':
r("p <- p + facet_grid(gridVal_Y ~ .)")
elif gridVal_X != 'None' and gridVal_Y != 'None':
r("p <- p + facet_grid(gridVal_Y ~ gridVal_X)")
r("p <- p + theme(strip.text.x=element_text(size=10, colour='blue', angle=0))")
r("p <- p + theme(strip.text.y=element_text(size=10, colour='blue', angle=90))")
palette = all['palette']
r.assign('palette', palette)
if palette == 'gdocs':
r('pal <- gdocs_pal()(20)')
elif palette == 'hc':
r('pal <- hc_pal()(10)')
elif palette == 'Set1':
r('pal <- brewer.pal(8, "Set1")')
elif palette == 'Set2':
r('pal <- brewer.pal(8, "Set2")')
elif palette == 'Set3':
r('pal <- brewer.pal(12, "Set3")')
elif palette == 'Paired':
r('pal <- brewer.pal(12, "Paired")')
elif palette == 'Dark2':
r('pal <- brewer.pal(12, "Dark2")')
elif palette == 'Accent':
r('pal <- brewer.pal(12, "Accent")')
r('nColors <- length(pal)')
r('number <- nlevels(gDF$myColor)')
r('colors <- rep(pal, length.out=number) ')
r("p <- p + scale_fill_manual(name='', values=colors, guide=guide_legend(override.aes=list(shape=21)))")
r('number <- nlevels(gDF$myShape)')
r('shapes <- rep(c(21, 22, 23, 24, 25), length.out=number) ')
r("p <- p + scale_shape_manual(name='', values=shapes)")
r("p <- p + theme(legend.text=element_text(size=7))")
r("p <- p + theme(legend.position='bottom')")
r("my.formula <- y ~ x")
r("p <- p + geom_smooth(method='lm', se=T, color='black', formula=my.formula)")
if DepVar == 0:
r("p <- p + ylab('Abundance') + xlab(paste(xVal))")
elif DepVar == 1:
r("p <- p + ylab('Relative Abundance') + xlab(paste(xVal))")
elif DepVar == 2:
r("p <- p + ylab('OTU Richness') + xlab(paste(xVal))")
elif DepVar == 3:
r("p <- p + ylab('OTU Diversity') + xlab(paste(xVal))")
elif DepVar == 4:
r("p <- p + ylab('Total Abundance') + xlab(paste(xVal))")
path = "myPhyloDB/media/temp/anova/Rplots"
if not os.path.exists(path):
os.makedirs(path)
r.assign("path", path)
r.assign("RID", RID)
r("file <- paste(path, '/', RID, '.anova.pdf', sep='')")
r("p <- set_panel_size(p, height=unit(2.9, 'in'), width=unit(2.9, 'in'))")
r("nlev <- nlevels(as.factor(gDF$gridVal_X))")
r('if (nlev == 0) { \
myWidth <- 8 \
} else { \
myWidth <- min(3*nlev+4, 50) \
}')
r("nlev <- nlevels(as.factor(gDF$gridVal_Y))")
r('if (nlev == 0) { \
myHeight <- 8 \
} else { \
myHeight <- min(3*nlev+4, 50) \
}')
r("ggsave(filename=file, plot=p, units='in', height=myHeight, width=myWidth, limitsize=F)")
pValDict = {}
counter = 1
catLevels = len(set(catValues))
grouped1 = finalDF.groupby(['rank_name', 'rank_id'])
for name1, group1 in grouped1:
D = ''
r.assign("df", group1)
trtString = " * ".join(allFields)
if DepVar == 0:
anova_string = "fit <- lm(abund ~ " + str(trtString) + ", data=df)"
r.assign("cmd", anova_string)
r("eval(parse(text=cmd))")
elif DepVar == 1:
anova_string = "fit <- lm(rel_abund ~ " + str(trtString) + ", data=df)"
r.assign("cmd", anova_string)
r("eval(parse(text=cmd))")
elif DepVar == 2:
anova_string = "fit <- lm(rich ~ " + str(trtString) + ", data=df)"
r.assign("cmd", anova_string)
r("eval(parse(text=cmd))")
elif DepVar == 3:
anova_string = "fit <- lm(diversity ~ " + str(trtString) + ", data=df)"
r.assign("cmd", anova_string)
r("eval(parse(text=cmd))")
elif DepVar == 4:
anova_string = "fit <- lm(abund_16S ~ " + str(trtString) + ", data=df)"
r.assign("cmd", anova_string)
r("eval(parse(text=cmd))")
# calculate predicted scores (full model)
pred_string = "df$pred <- predict(fit, df)"
r.assign("cmd", pred_string)
r("eval(parse(text=cmd))")
r("options(width=5000)")
r("aov <- anova(fit)")
aov = r("aov")
tempStuff = aov.split('\n')
for i in xrange(len(tempStuff)):
if i >= 4:
D += tempStuff[i] + '\n'
fit = r("summary(fit)")
tempStuff = fit.split('\n')
for i in xrange(len(tempStuff)):
if i >= 8:
D += tempStuff[i] + '\n'
r("p_vals <- summary(fit)$coefficients[,4]")
p_vals = r.get("p_vals")
if not np.isnan(p_vals).any():
p_value = min(p_vals)
pValDict[name1] = p_value
else:
pValDict[name1] = np.nan
result += 'Name: ' + str(name1[0]) + '\n'
result += 'ID: ' + str(name1[1]) + '\n'
if DepVar == 0:
result += 'Dependent Variable: Abundance' + '\n'
elif DepVar == 1:
result += 'Dependent Variable: Relative Abundance' + '\n'
elif DepVar == 2:
result += 'Dependent Variable: OTU Richness' + '\n'
elif DepVar == 3:
result += 'Dependent Variable: OTU Diversity' + '\n'
elif DepVar == 4:
result += 'Dependent Variable: Total Abundance' + '\n'
result += '\nANCOVA table:\n'
D = D.decode('utf-8')
result += D + '\n'
result += '===============================================\n'
result += '\n\n\n\n'
taxa_no = len(grouped1)
functions.setBase(RID, 'Step 3 of 4: Performing statistical test...taxa ' + str(counter) + ' of ' + str(taxa_no) + ' is complete!')
counter += 1
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ #
functions.setBase(RID, 'Step 3 of 4: Performing statistical test...done!')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ #
functions.setBase(RID, 'Step 4 of 4: Formatting graph data for display...')
finalDF['sample_name'] = ''
for index, row in finalDF.iterrows():
val = Sample.objects.get(sampleid=row['sampleid']).sample_name
finalDF.loc[index, 'sample_name'] = val
shapes_idx = 0
seriesList = []
grouped1 = finalDF.groupby(['rank_name', 'rank_id'])
for name1, group1 in grouped1:
pValue = pValDict[name1]
if sig_only == 0:
if catLevels > 1:
grouped2 = group1.groupby(catFields)
for name2, group2 in grouped2:
dataList = []
x = []
y = []
if DepVar == 0:
x = group2[quantFields[0]].values.astype(float).tolist()
y = group2['abund'].values.astype(float).tolist()
elif DepVar == 1:
x = group2[quantFields[0]].values.astype(float).tolist()
y = group2['rel_abund'].values.astype(float).tolist()
elif DepVar == 2:
x = group2[quantFields[0]].values.astype(float).tolist()
y = group2['rich'].values.astype(float).tolist()
elif DepVar == 3:
x = group2[quantFields[0]].values.astype(float).tolist()
y = group2['diversity'].values.astype(float).tolist()
elif DepVar == 4:
x = group2[quantFields[0]].values.astype(float).tolist()
y = group2['abund_16S'].values.astype(float).tolist()
if DepVar == 0:
for index, row in group2.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['abund'])
dataList.append(dataDict)
elif DepVar == 1:
for index, row in group2.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['rel_abund'])
dataList.append(dataDict)
elif DepVar == 2:
for index, row in group2.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['rich'])
dataList.append(dataDict)
elif DepVar == 3:
for index, row in group2.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['diversity'])
dataList.append(dataDict)
elif DepVar == 4:
for index, row in group2.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['abund_16S'])
dataList.append(dataDict)
seriesDict = {}
seriesDict['turboThreshold'] = 0
seriesDict['type'] = 'scatter'
seriesDict['name'] = str(name1[0]) + ": " + str(name2)
seriesDict['data'] = dataList
markerDict = {}
markerDict['symbol'] = shapes[shapes_idx]
seriesDict['marker'] = markerDict
seriesDict['data'] = dataList
seriesList.append(seriesDict)
slp, inter, r_value, p, std_err = stats.linregress(x, y)
min_y = float(slp*min(x) + inter)
max_y = float(slp*max(x) + inter)
slope = "%0.3f" % slp
intercept = "%0.3f" % inter
r_sq = r_value * r_value
r_square = "%0.3f" % r_sq
regrList = []
regrList.append([float(min(x)), min_y])
regrList.append([float(max(x)), max_y])
regrDict = {}
regrDict['type'] = 'line'
sup2 = u"\u00B2"
regrDict['name'] = 'y = ' + str(slope) + 'x' + ' + ' + str(intercept) + '; R' + sup2 + ' = ' + str(r_square)
regrDict['data'] = regrList
regrDict['color'] = 'black'
markerDict = {}
markerDict['enabled'] = False
regrDict['marker'] = markerDict
seriesList.append(regrDict)
shapes_idx += 1
if shapes_idx >= len(shapes):
shapes_idx = 0
else: # if catLevel <=1
dataList = []
x = []
y = []
if DepVar == 0:
x = group1[quantFields[0]].values.astype(float).tolist()
y = group1['abund'].values.astype(float).tolist()
elif DepVar == 1:
x = group1[quantFields[0]].values.astype(float).tolist()
y = group1['rel_abund'].values.astype(float).tolist()
elif DepVar == 2:
x = group1[quantFields[0]].values.astype(float).tolist()
y = group1['rich'].values.astype(float).tolist()
elif DepVar == 3:
x = group1[quantFields[0]].values.astype(float).tolist()
y = group1['diversity'].values.astype(float).tolist()
elif DepVar == 4:
x = group1[quantFields[0]].values.astype(float).tolist()
y = group1['abund_16S'].values.astype(float).tolist()
if DepVar == 0:
for index, row in group1.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['abund'])
dataList.append(dataDict)
elif DepVar == 1:
for index, row in group1.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['rel_abund'])
dataList.append(dataDict)
elif DepVar == 2:
for index, row in group1.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['rich'])
dataList.append(dataDict)
elif DepVar == 3:
for index, row in group1.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['diversity'])
dataList.append(dataDict)
elif DepVar == 4:
for index, row in group1.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['abund_16S'])
dataList.append(dataDict)
seriesDict = {}
seriesDict['turboThreshold'] = 0
seriesDict['type'] = 'scatter'
seriesDict['name'] = str(name1[0])
seriesDict['data'] = dataList
markerDict = {}
markerDict['symbol'] = shapes[shapes_idx]
seriesDict['marker'] = markerDict
seriesDict['data'] = dataList
seriesList.append(seriesDict)
slp, inter, r_value, p, std_err = stats.linregress(x, y)
min_y = float(slp*min(x) + inter)
max_y = float(slp*max(x) + inter)
slope = "%0.3f" % slp
intercept = "%0.3f" % inter
r_sq = r_value * r_value
r_square = "%0.3f" % r_sq
regrList = []
regrList.append([float(min(x)), min_y])
regrList.append([float(max(x)), max_y])
regrDict = {}
regrDict['type'] = 'line'
sup2 = u"\u00B2"
regrDict['name'] = 'y = ' + str(slope) + 'x' + ' + ' + str(intercept) + '; R' + sup2 + ' = ' + str(r_square)
regrDict['data'] = regrList
regrDict['color'] = 'black'
markerDict = {}
markerDict['enabled'] = False
regrDict['marker'] = markerDict
seriesList.append(regrDict)
elif sig_only == 1:
if pValue < 0.05:
if catLevels > 1:
grouped2 = group1.groupby(catFields)
for name2, group2 in grouped2:
dataList = []
x = []
y = []
if DepVar == 0:
x = group2[quantFields[0]].values.astype(float).tolist()
y = group2['abund'].values.astype(float).tolist()
elif DepVar == 1:
x = group2[quantFields[0]].values.astype(float).tolist()
y = group2['rel_abund'].values.astype(float).tolist()
elif DepVar == 2:
x = group2[quantFields[0]].values.astype(float).tolist()
y = group2['rich'].values.astype(float).tolist()
elif DepVar == 3:
x = group2[quantFields[0]].values.astype(float).tolist()
y = group2['diversity'].values.astype(float).tolist()
elif DepVar == 4:
x = group2[quantFields[0]].values.astype(float).tolist()
y = group2['abund_16S'].values.astype(float).tolist()
if DepVar == 0:
for index, row in group2.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['abund'])
dataList.append(dataDict)
elif DepVar == 1:
for index, row in group2.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['rel_abund'])
dataList.append(dataDict)
elif DepVar == 2:
for index, row in group2.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['rich'])
dataList.append(dataDict)
elif DepVar == 3:
for index, row in group2.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['diversity'])
dataList.append(dataDict)
elif DepVar == 4:
for index, row in group2.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['abund_16S'])
dataList.append(dataDict)
seriesDict = {}
seriesDict['turboThreshold'] = 0
seriesDict['type'] = 'scatter'
seriesDict['name'] = str(name1[0]) + ": " + str(name2)
seriesDict['data'] = dataList
markerDict = {}
markerDict['symbol'] = shapes[shapes_idx]
seriesDict['marker'] = markerDict
seriesDict['data'] = dataList
seriesList.append(seriesDict)
slp, inter, r_value, p, std_err = stats.linregress(x, y)
min_y = float(slp*min(x) + inter)
max_y = float(slp*max(x) + inter)
slope = "%0.3f" % slp
intercept = "%0.3f" % inter
r_sq = r_value * r_value
r_square = "%0.3f" % r_sq
regrList = []
regrList.append([float(min(x)), min_y])
regrList.append([float(max(x)), max_y])
regrDict = {}
regrDict['type'] = 'line'
regrDict['name'] = 'y = ' + str(slope) + 'x' + ' + ' + str(intercept) + '; R2 = ' + str(r_square)
regrDict['data'] = regrList
regrDict['color'] = 'black'
markerDict = {}
markerDict['enabled'] = False
regrDict['marker'] = markerDict
seriesList.append(regrDict)
shapes_idx += 1
if shapes_idx >= len(shapes):
shapes_idx = 0
else: # if catLevel <=1
dataList = []
x = []
y = []
if DepVar == 0:
x = group1[quantFields[0]].values.astype(float).tolist()
y = group1['abund'].values.astype(float).tolist()
elif DepVar == 1:
x = group1[quantFields[0]].values.astype(float).tolist()
y = group1['rel_abund'].values.astype(float).tolist()
elif DepVar == 2:
x = group1[quantFields[0]].values.astype(float).tolist()
y = group1['rich'].values.astype(float).tolist()
elif DepVar == 3:
x = group1[quantFields[0]].values.astype(float).tolist()
y = group1['diversity'].values.astype(float).tolist()
elif DepVar == 4:
x = group1[quantFields[0]].values.astype(float).tolist()
y = group1['abund_16S'].values.astype(float).tolist()
if DepVar == 0:
for index, row in group1.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['abund'])
dataList.append(dataDict)
elif DepVar == 1:
for index, row in group1.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['rel_abund'])
dataList.append(dataDict)
elif DepVar == 2:
for index, row in group1.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['rich'])
dataList.append(dataDict)
elif DepVar == 3:
for index, row in group1.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['diversity'])
dataList.append(dataDict)
elif DepVar == 4:
for index, row in group1.iterrows():
dataDict = {}
dataDict['name'] = row['sample_name']
dataDict['x'] = float(row[quantFields[0]])
dataDict['y'] = float(row['abund_16S'])
dataList.append(dataDict)
seriesDict = {}
seriesDict['turboThreshold'] = 0
seriesDict['type'] = 'scatter'
seriesDict['name'] = str(name1[0])
seriesDict['data'] = dataList
markerDict = {}
markerDict['symbol'] = shapes[shapes_idx]
seriesDict['marker'] = markerDict
seriesDict['data'] = dataList
seriesList.append(seriesDict)
slp, inter, r_value, p, std_err = stats.linregress(x, y)
min_y = float(slp*min(x) + inter)
max_y = float(slp*max(x) + inter)
slope = "%0.3f" % slp
intercept = "%0.3f" % inter
r_sq = r_value * r_value
r_square = "%0.3f" % r_sq
regrList = []
regrList.append([float(min(x)), min_y])
regrList.append([float(max(x)), max_y])
regrDict = {}
regrDict['type'] = 'line'
regrDict['name'] = 'y = ' + str(slope) + 'x' + ' + ' + str(intercept) + '; R2 = ' + str(r_square)
regrDict['data'] = regrList
regrDict['color'] = 'black'
markerDict = {}
markerDict['enabled'] = False
regrDict['marker'] = markerDict
seriesList.append(regrDict)
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\ #
xAxisDict = {}
xTitle = {}
xTitle['text'] = quantFields[0]
xTitle['style'] = {'fontSize': '18px', 'fontWeight': 'bold'}
xAxisDict['title'] = xTitle
yAxisDict = {}
yTitle = {}
if DepVar == 0:
yTitle['text'] = 'Abundance'
elif DepVar == 1:
yTitle['text'] = 'Relative Abundance'
elif DepVar == 2:
yTitle['text'] = 'OTU Richness'
elif DepVar == 3:
yTitle['text'] = 'OTU Diversity'
elif DepVar == 4:
yTitle['text'] = 'Total Abundance'
yAxisDict['title'] = yTitle
if transform != 0:
tname = {
'1': "Ln", '2': "Log10", '3': "Sqrt", '4': "Logit", '5': "Arcsin"
}
yTitle['text'] = tname[str(transform)] + "(" + str(yTitle['text']) + ")"
yTitle['style'] = {'fontSize': '18px', 'fontWeight': 'bold'}
yAxisDict['title'] = yTitle
styleDict = {'style': {'fontSize': '14px'}}
xAxisDict['labels'] = styleDict
yAxisDict['labels'] = styleDict
finalDict['series'] = seriesList
finalDict['xAxis'] = xAxisDict
finalDict['yAxis'] = yAxisDict
if not seriesList:
finalDict['empty'] = 0
else:
finalDict['empty'] = 1
functions.setBase(RID, 'Step 4 of 4: Formatting graph data for display...done!')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
# datatable of taxa mapped to selected kegg orthologies
if not treeType == 1 and mapTaxa == 'yes':
records = allDF.values.tolist()
finalDict['taxData'] = json.dumps(records)
columns = allDF.columns.values.tolist()
finalDict['taxColumns'] = json.dumps(columns)
finalDict['resType'] = 'res'
finalDict['text'] = result
finalDict['error'] = 'none'
res = json.dumps(finalDict)
return HttpResponse(res, content_type='application/json')
except Exception as e:
if not stops[PID] == RID:
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG,)
myDate = "\nDate: " + str(datetime.datetime.now()) + "\n"
logging.exception(myDate)
myDict = {}
myDict['error'] = "There was an error during your analysis:\nError: " + str(e.message) + "\nTimestamp: " + str(datetime.datetime.now())
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
| gpl-3.0 |
mne-tools/mne-tools.github.io | 0.22/_downloads/f553cfe58e70a6f3fbc4e55fbf53ba56/plot_time_frequency_erds.py | 5 | 5389 | """
===============================
Compute and visualize ERDS maps
===============================
This example calculates and displays ERDS maps of event-related EEG data. ERDS
(sometimes also written as ERD/ERS) is short for event-related
desynchronization (ERD) and event-related synchronization (ERS) [1]_.
Conceptually, ERD corresponds to a decrease in power in a specific frequency
band relative to a baseline. Similarly, ERS corresponds to an increase in
power. An ERDS map is a time/frequency representation of ERD/ERS over a range
of frequencies [2]_. ERDS maps are also known as ERSP (event-related spectral
perturbation) [3]_.
We use a public EEG BCI data set containing two different motor imagery tasks
available at PhysioNet. The two tasks are imagined hand and feet movement. Our
goal is to generate ERDS maps for each of the two tasks.
First, we load the data and create epochs of 5s length. The data sets contain
multiple channels, but we will only consider the three channels C3, Cz, and C4.
We compute maps containing frequencies ranging from 2 to 35Hz. We map ERD to
red color and ERS to blue color, which is the convention in many ERDS
publications. Finally, we perform cluster-based permutation tests to estimate
significant ERDS values (corrected for multiple comparisons within channels).
References
----------
.. [1] G. Pfurtscheller, F. H. Lopes da Silva. Event-related EEG/MEG
synchronization and desynchronization: basic principles. Clinical
Neurophysiology 110(11), 1842-1857, 1999.
.. [2] B. Graimann, J. E. Huggins, S. P. Levine, G. Pfurtscheller.
Visualization of significant ERD/ERS patterns in multichannel EEG and
ECoG data. Clinical Neurophysiology 113(1), 43-47, 2002.
.. [3] S. Makeig. Auditory event-related dynamics of the EEG spectrum and
effects of exposure to tones. Electroencephalography and Clinical
Neurophysiology 86(4), 283-293, 1993.
"""
# Authors: Clemens Brunner <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import eegbci
from mne.io import concatenate_raws, read_raw_edf
from mne.time_frequency import tfr_multitaper
from mne.stats import permutation_cluster_1samp_test as pcluster_test
from mne.viz.utils import center_cmap
# load and preprocess data ####################################################
subject = 1 # use data from subject 1
runs = [6, 10, 14] # use only hand and feet motor imagery runs
fnames = eegbci.load_data(subject, runs)
raws = [read_raw_edf(f, preload=True) for f in fnames]
raw = concatenate_raws(raws)
raw.rename_channels(lambda x: x.strip('.')) # remove dots from channel names
events, _ = mne.events_from_annotations(raw, event_id=dict(T1=2, T2=3))
picks = mne.pick_channels(raw.info["ch_names"], ["C3", "Cz", "C4"])
# epoch data ##################################################################
tmin, tmax = -1, 4 # define epochs around events (in s)
event_ids = dict(hands=2, feet=3) # map event IDs to tasks
epochs = mne.Epochs(raw, events, event_ids, tmin - 0.5, tmax + 0.5,
picks=picks, baseline=None, preload=True)
# compute ERDS maps ###########################################################
freqs = np.arange(2, 36, 1) # frequencies from 2-35Hz
n_cycles = freqs # use constant t/f resolution
vmin, vmax = -1, 1.5 # set min and max ERDS values in plot
baseline = [-1, 0] # baseline interval (in s)
cmap = center_cmap(plt.cm.RdBu, vmin, vmax) # zero maps to white
kwargs = dict(n_permutations=100, step_down_p=0.05, seed=1,
buffer_size=None, out_type='mask') # for cluster test
# Run TF decomposition overall epochs
tfr = tfr_multitaper(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=False, average=False,
decim=2)
tfr.crop(tmin, tmax)
tfr.apply_baseline(baseline, mode="percent")
for event in event_ids:
# select desired epochs for visualization
tfr_ev = tfr[event]
fig, axes = plt.subplots(1, 4, figsize=(12, 4),
gridspec_kw={"width_ratios": [10, 10, 10, 1]})
for ch, ax in enumerate(axes[:-1]): # for each channel
# positive clusters
_, c1, p1, _ = pcluster_test(tfr_ev.data[:, ch, ...], tail=1, **kwargs)
# negative clusters
_, c2, p2, _ = pcluster_test(tfr_ev.data[:, ch, ...], tail=-1,
**kwargs)
# note that we keep clusters with p <= 0.05 from the combined clusters
# of two independent tests; in this example, we do not correct for
# these two comparisons
c = np.stack(c1 + c2, axis=2) # combined clusters
p = np.concatenate((p1, p2)) # combined p-values
mask = c[..., p <= 0.05].any(axis=-1)
# plot TFR (ERDS map with masking)
tfr_ev.average().plot([ch], vmin=vmin, vmax=vmax, cmap=(cmap, False),
axes=ax, colorbar=False, show=False, mask=mask,
mask_style="mask")
ax.set_title(epochs.ch_names[ch], fontsize=10)
ax.axvline(0, linewidth=1, color="black", linestyle=":") # event
if ch != 0:
ax.set_ylabel("")
ax.set_yticklabels("")
fig.colorbar(axes[0].images[-1], cax=axes[-1])
fig.suptitle("ERDS ({})".format(event))
fig.show()
| bsd-3-clause |
ssorgatem/qiime | qiime/make_2d_plots.py | 9 | 23540 | #!/usr/bin/env python
# File created on 09 Feb 2010
# file make_2d_plots.py
__author__ = "Jesse Stombaugh and Micah Hamady"
__copyright__ = "Copyright 2011, The QIIME Project"
# remember to add yourself
__credits__ = ["Jesse Stombaugh", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "[email protected]"
import re
from matplotlib import use
use('Agg', warn=False)
from matplotlib.pylab import *
from matplotlib.cbook import iterable, is_string_like
from matplotlib.patches import Ellipse
from matplotlib.font_manager import FontProperties
from string import strip
from numpy import array, asarray, ndarray
from time import strftime
from random import choice
from qiime.util import summarize_pcoas, isarray, qiime_system_call
from qiime.parse import group_by_field, group_by_fields, parse_coords
from qiime.colors import data_color_order, data_colors, \
get_group_colors, data_colors, iter_color_groups
from qiime.sort import natsort
from tempfile import mkdtemp
import os
import numpy
SCREE_TABLE_HTML = """<table cellpadding=0 cellspacing=0 border=1>
<tr><th align=center colspan=3 border=0>Scree plot</th></tr>
<tr>
<td class=normal align=center border=0>%s</td>
</tr>
</table>
<br><br>"""
TABLE_HTML = """<table cellpadding=0 cellspacing=0 border=1>
<tr><th align=center colspan=3 border=0>%s</th></tr>
<tr>
<td class=normal align=center border=0>%s</td>
<td class=normal align=center border=0>%s</td>
<td class=normal align=center border=0>%s</td>
</tr>
</table>
<br><br>"""
PAGE_HTML = """
<html>
<head>
<style type="text/css">
.normal { color: black; font-family:Arial,Verdana; font-size:12;
font-weight:normal;}
</style>
<script type="text/javascript" src="js/overlib.js"></script>
<title>%s</title>
</head>
<body>
<div id="overDiv" style="position:absolute; visibility:hidden; z-index:1000;">\
</div>
%s
</body>
</html>
"""
IMG_SRC = """<img src="%s" border=0 />"""
DOWNLOAD_LINK = """<a href="%s" >%s</a>"""
AREA_SRC = """<AREA shape="circle" coords="%d,%d,5" href="#%s" \
onmouseover="return overlib('%s');" onmouseout="return nd();">\n"""
IMG_MAP_SRC = """<img src="%s" border="0" ismap usemap="#points%s" width="%d" \
height="%d" />\n"""
MAP_SRC = """
<MAP name="points%s">
%s
</MAP>
"""
shape = [
's', # : square
'o', # : circle
'^', # : triangle up
'>', # : triangle right
'v', # : triangle down
'<', # : triangle left
'd', # : diamond
'p', # : pentagon
'h', # : hexagon
]
'''
data_colors={'blue':'#0000FF','lime':'#00FF00','red':'#FF0000', \
'aqua':'#00FFFF','fuchsia':'#FF00FF','yellow':'#FFFF00', \
'green':'#008000','maroon':'#800000','teal':'#008080', \
'purple':'#800080','olive':'#808000', \
'silver':'#C0C0C0','gray':'#808080'}
'''
default_colors = ['blue', 'lime', 'red', 'aqua', 'fuchsia', 'yellow', 'green',
'maroon', 'teal', 'purple', 'olive', 'silver', 'gray']
# This function used to live in make_3d_plots.py but in the Boulder sk-bio
# code sprint it got moved here to remove the 3D files.
def get_coord(coord_fname, method="IQR"):
"""Opens and returns coords location matrix and metadata.
Also two spread matrices (+/-) if passed a dir of coord files.
If only a single coord file, spread matrices are returned as None.
"""
if not os.path.isdir(coord_fname):
try:
coord_f = open(coord_fname, 'U')
except (TypeError, IOError):
raise MissingFileError('Coord file required for this analysis')
coord_header, coords, eigvals, pct_var = parse_coords(coord_f)
return [coord_header, coords, eigvals, pct_var, None, None]
else:
master_pcoa, support_pcoas = load_pcoa_files(coord_fname)
# get Summary statistics
coords, coords_low, coords_high, eigval_average, coord_header = \
summarize_pcoas(master_pcoa, support_pcoas, method=method)
pct_var = master_pcoa[3] # should be getting this from an average
# make_3d_plots expects coord_header to be a python list
coord_header = list(master_pcoa[0])
return (
[coord_header,
coords,
eigval_average,
pct_var,
coords_low,
coords_high]
)
def make_line_plot(
dir_path, data_file_link, background_color, label_color, xy_coords,
props, x_len=8, y_len=4, draw_axes=False, generate_eps=True):
""" Write a line plot
xy_coords: a dict of form {series_label:([x data], [y data], point_marker, color)}
(code adapted from Micah Hamady's code)
"""
rc('font', size='8')
rc('axes', linewidth=.5, edgecolor=label_color)
rc('axes', labelsize=8)
rc('xtick', labelsize=8)
rc('ytick', labelsize=8)
fig = figure(figsize=(x_len, y_len))
mtitle = props.get("title", "Groups")
x_label = props.get("xlabel", "X")
y_label = props.get("ylabel", "Y")
title('%s' % mtitle, fontsize='10', color=label_color)
xlabel(x_label, fontsize='8', color=label_color)
ylabel(y_label, fontsize='8', color=label_color)
sorted_keys = sorted(xy_coords.keys())
labels = []
for s_label in sorted_keys:
s_data = xy_coords[s_label]
c = s_data[3]
m = s_data[2]
plot(s_data[0], s_data[1], c=c, marker=m, label=s_label,
linewidth=.1, ms=5, alpha=1.0)
fp = FontProperties()
fp.set_size('8')
legend(prop=fp, loc=0)
show()
img_name = 'scree_plot.png'
savefig(
os.path.join(dir_path,
img_name),
dpi=80,
facecolor=background_color)
# Create zipped eps files
eps_link = ""
if generate_eps:
eps_img_name = str('scree_plot.eps')
savefig(os.path.join(dir_path, eps_img_name), format='eps')
out, err, retcode = qiime_system_call(
"gzip -f " + os.path.join(dir_path, eps_img_name))
eps_link = DOWNLOAD_LINK % ((os.path.join(data_file_link, eps_img_name)
+ ".gz"), "Download Figure")
return os.path.join(data_file_link, img_name), eps_link
def draw_scree_graph(dir_path, data_file_link, background_color, label_color,
generate_eps, data):
"""Draw scree plot
(code adapted from Micah Hamady's code)
"""
dimensions = len(data['coord'][3])
props = {}
props["title"] = "PCoA Scree Plot (First %s dimensions)" % dimensions
props["ylabel"] = "Fraction of Variance"
props["xlabel"] = "Principal component"
xy_coords = {}
x_points = [x for x in range(dimensions)]
c_data = [float(x) / 100.0 for x in data['coord'][3]]
xy_coords['Variance'] = (x_points, c_data, 'o', 'r')
cum_var = [c_data[0]]
for ix in range(dimensions - 1):
cum_var.append(cum_var[ix] + c_data[ix + 1])
xy_coords['Cumulative variance'] = (x_points, cum_var, 's', 'b')
img_src, eps_link = make_line_plot(
dir_path, data_file_link, background_color, label_color,
xy_coords=xy_coords, props=props, x_len=4.5,
y_len=4.5, generate_eps=generate_eps)
return IMG_SRC % img_src, eps_link
def make_interactive_scatter(plot_label, dir_path, data_file_link,
background_color, label_color, sample_location,
alpha, xy_coords,
props, x_len=8, y_len=4, size=10,
draw_axes=False, generate_eps=True):
"""Write interactive plot
xy_coords: a dict of form {series_label:([x data], [y data], \
[xy point label],[color])}
"""
my_axis = None
rc('font', size='8')
rc('patch', linewidth=0)
rc('axes', linewidth=.5, edgecolor=label_color)
rc('axes', labelsize=8)
rc('xtick', labelsize=8, color=label_color)
rc('ytick', labelsize=8, color=label_color)
sc_plot = draw_scatterplot(props, xy_coords, x_len, y_len, size,
background_color, label_color, sample_location,
alpha)
mtitle = props.get("title", "Groups")
x_label = props.get("xlabel", "X")
y_label = props.get("ylabel", "Y")
title('%s' % mtitle, fontsize='10', color=label_color)
xlabel(x_label, fontsize='8', color=label_color)
ylabel(y_label, fontsize='8', color=label_color)
show()
if draw_axes:
axvline(linewidth=.5, x=0, color=label_color)
axhline(linewidth=.5, y=0, color=label_color)
if my_axis is not None:
axis(my_axis)
img_name = x_label[0:3] + '_vs_' + y_label[0:3] + '_plot.png'
savefig(os.path.join(dir_path, img_name),
dpi=80, facecolor=background_color)
# Create zipped eps files
eps_link = ""
if generate_eps:
eps_img_name = str(x_label[0:3] + 'vs' + y_label[0:3] + 'plot.eps')
savefig(os.path.join(dir_path, eps_img_name), format='eps')
out, err, retcode = qiime_system_call(
"gzip -f " + os.path.join(dir_path, eps_img_name))
eps_link = DOWNLOAD_LINK % ((os.path.join(data_file_link, eps_img_name)
+ ".gz"), "Download Figure")
all_cids, all_xcoords, all_ycoords = transform_xy_coords(
xy_coords, sc_plot)
xmap, img_height, img_width = generate_xmap(
x_len, y_len, all_cids, all_xcoords,
all_ycoords)
points_id = plot_label + x_label[2:3] + y_label[2:3]
return IMG_MAP_SRC % (os.path.join(data_file_link, img_name), points_id,
img_width, img_height), MAP_SRC % \
(points_id, ''.join(xmap)), eps_link
def generate_xmap(x_len, y_len, all_cids, all_xcoords, all_ycoords):
"""Generates the html interactive image map"""
# Determine figure height and width"""
img_height = x_len * 80
img_width = y_len * 80
# Write html script which allows for mouseover of labels
xmap = []
for cid, x, y in zip(all_cids, all_xcoords, all_ycoords):
xmap.append(AREA_SRC % (x, img_height - y, cid, cid))
return xmap, img_height, img_width
def draw_scatterplot(props, xy_coords, x_len, y_len, size, background_color,
label_color, sample_location, alpha):
"""Create scatterplot figure"""
fig = figure(figsize=(x_len, y_len))
xPC = int(props['xlabel'][2:3])
yPC = int(props['ylabel'][2:3])
sorted_keys = xy_coords.keys()
scatters = {}
size_ct = shape_ct = 0
xPC = xPC - 1
yPC = yPC - 1
# Iterate through coords and add points to the scatterplot
for s_label in sorted_keys:
s_data = xy_coords[s_label]
if s_data[0] == []:
pass
else:
c = s_data[3]
m = s_data[4][0]
ax = fig.add_subplot(111, axisbg=background_color)
# set tick colors and width
for line in ax.yaxis.get_ticklines():
# line is a matplotlib.lines.Line2D instance
line.set_color(label_color)
line.set_markeredgewidth(1)
for line in ax.xaxis.get_ticklines():
# line is a matplotlib.lines.Line2D instance
line.set_color(label_color)
line.set_markeredgewidth(1)
if isarray(s_data[5][0]) and isarray(s_data[6][0]) and \
isarray(s_data[7][0]):
matrix_low = s_data[5][0]
matrix_high = s_data[6][0]
ellipse_ave = s_data[7][0]
ellipse_x = [ellipse_ave[sample_location[s_label], xPC]]
ellipse_y = [ellipse_ave[sample_location[s_label], yPC]]
width = [fabs(matrix_high[sample_location[s_label], xPC] -
matrix_low[sample_location[s_label], xPC])]
height = [fabs(matrix_high[sample_location[s_label], yPC] -
matrix_low[sample_location[s_label], yPC])]
sc_plot = scatter_ellipse(ax, ellipse_x,
ellipse_y, width, height, c=c, a=0.0,
alpha=alpha)
sc_plot.scatter(ellipse_x, ellipse_y, c=c, marker=m,
alpha=1.0)
else:
sc_plot = ax.scatter(s_data[0], s_data[1], c=c, marker=m,
alpha=1.0, s=size, linewidth=1, edgecolor=c)
size_ct += 1
shape_ct += 1
scatters[s_label] = sc_plot
return sc_plot
def transform_xy_coords(xy_coords, sc_plot):
"""Transform the coords from the scatterplot into coords that can be \
referenced in the html page"""
sorted_keys = xy_coords.keys()
all_cids = []
all_xcoords = []
all_ycoords = []
sc_plot.set_transform(sc_plot.axes.transData)
trans = sc_plot.get_transform()
for s_label in sorted_keys:
s_data = xy_coords[s_label]
if s_data[0] == []:
pass
else:
icoords = trans.transform(zip(s_data[0], s_data[1]))
xcoords, ycoords = zip(*icoords)
all_cids.extend(s_data[2])
all_xcoords.extend(xcoords)
all_ycoords.extend(ycoords)
return all_cids, all_xcoords, all_ycoords
def draw_pcoa_graph(plot_label, dir_path, data_file_link, coord_1, coord_2,
coord_1r, coord_2r, mat_ave, sample_location,
data, prefs, groups, colors, background_color, label_color,
data_colors, data_color_order,
generate_eps=True,
pct_variation_below_one=False):
"""Draw PCoA graphs"""
coords, pct_var = convert_coord_data_to_dict(data)
mapping = data['map']
if coord_1 not in coords:
raise ValueError("Principal coordinate: %s not available." % coord_1)
if coord_2 not in coords:
raise ValueError("Principal coordinate: %s not available." % coord_2)
# Handle matplotlib scale bug when all coords are 0.0
if not len([x for x in map(float, coords[coord_2]) if x != 0.0]):
for ix in range(len(coords[coord_2])):
coords[coord_2][ix] = '1e-255'
if not len([x for x in map(float, coords[coord_1]) if x != 0.0]):
for ix in range(len(coords[coord_1])):
coords[coord_1][ix] = '1e-255'
# Write figure labels
pct_exp1 = float(pct_var[coord_1])
pct_exp2 = float(pct_var[coord_2])
if float(pct_var['1']) < 1 and not pct_variation_below_one:
pct_exp1 *= 100
pct_exp2 *= 100
props = {}
props["title"] = "PCoA - PC%s vs PC%s" % (coord_1, coord_2)
props["ylabel"] = "PC%s - Percent variation explained %.2f%%" \
% (coord_2, pct_exp2)
props["xlabel"] = "PC%s - Percent variation explained %.2f%%" \
% (coord_1, pct_exp1)
labels = coords['pc vector number']
p1 = map(float, coords[coord_2])
p2 = map(float, coords[coord_1])
if isarray(coord_1r) and isarray(coord_2r) and isarray(mat_ave):
p1r = coord_2r
p2r = coord_1r
else:
p1r = None
p2r = None
mat_ave = None
if len(p1) != len(p2):
raise ValueError("Principal coordinate vectors unequal length.")
p1d = dict(zip(labels, p1))
p2d = dict(zip(labels, p2))
alpha = data['alpha']
xy_coords = extract_and_color_xy_coords(
p1d, p2d, p1r, p2r, mat_ave, colors,
data_colors, groups, coords)
img_src, img_map, eps_link = make_interactive_scatter(
plot_label, dir_path,
data_file_link, background_color, label_color,
sample_location, alpha,
xy_coords=xy_coords, props=props, x_len=4.5,
y_len=4.5, size=20, draw_axes=True,
generate_eps=generate_eps)
return img_src + img_map, eps_link
def extract_and_color_xy_coords(
p1d, p2d, p1dr, p2dr, mat_ave, colors, data_colors,
groups, coords):
"""Extract coords from appropriate columns and attach their \
corresponding colors based on the group"""
xy_coords = {}
shape_ct = 0
for group_name, ids in (groups.items()):
x = 0
color = data_colors[colors[group_name]].toHex()
m = shape[shape_ct % len(shape)]
shape_ct += 1
for id_ in (ids):
cur_labs = []
cur_x = []
cur_y = []
cur_color = []
cur_shape = []
cur_1r = []
cur_2r = []
new_mat_ave = []
if id_ in coords['pc vector number']:
cur_labs.append(id_ + ': ' + group_name)
cur_x.append(p2d[id_])
cur_y.append(p1d[id_])
cur_color.append(color)
cur_shape.append(m)
if isarray(p2dr) and isarray(p1dr) and isarray(mat_ave):
cur_1r.append(p1dr)
cur_2r.append(p2dr)
new_mat_ave.append(mat_ave)
else:
cur_1r = [None]
cur_2r = [None]
new_mat_ave = [None]
xy_coords["%s" % id_] = (cur_x, cur_y, cur_labs, cur_color,
cur_shape, cur_1r, cur_2r, new_mat_ave)
return xy_coords
def create_html_filename(coord_filename, name_ending):
"""Generate html filename using the given coord filename"""
outpath = coord_filename.split('/')[-1] + name_ending
return outpath
def convert_coord_data_to_dict(data):
"""Convert the coord data into a dictionary"""
coord_header = data['coord'][0]
coords = data['coord'][1]
pct_var = data['coord'][3]
coords_dict = {}
pct_var_dict = {}
coords_dict['pc vector number'] = coord_header
for x in range(len(coords)):
coords_dict[str(x + 1)] = coords[0:, x]
pct_var_dict[str(x + 1)] = pct_var[x]
return coords_dict, pct_var_dict
def write_html_file(out_table, outpath):
"""Write 2D plots into an html file"""
page_out = PAGE_HTML % (outpath, out_table)
out = open(outpath, "w+")
out.write(page_out)
out.close()
def generate_2d_plots(prefs, data, html_dir_path, data_dir_path, filename,
background_color, label_color, generate_scree,
pct_variation_below_one):
"""Generate interactive 2D scatterplots"""
coord_tups = [("1", "2"), ("3", "2"), ("1", "3")]
mapping = data['map']
out_table = ''
# Iterate through prefs and generate html files for each colorby option
# Sort by the column name first
sample_location = {}
groups_and_colors = iter_color_groups(mapping, prefs)
groups_and_colors = list(groups_and_colors)
for i in range(len(groups_and_colors)):
labelname = groups_and_colors[i][0]
groups = groups_and_colors[i][1]
colors = groups_and_colors[i][2]
data_colors = groups_and_colors[i][3]
data_color_order = groups_and_colors[i][4]
data_file_dir_path = mkdtemp(dir=data_dir_path)
new_link = os.path.split(data_file_dir_path)
data_file_link = os.path.join('.', os.path.split(new_link[-2])[-1],
new_link[-1])
new_col_name = labelname
img_data = {}
plot_label = labelname
if 'support_pcoas' in data:
matrix_average, matrix_low, matrix_high, eigval_average, m_names = \
summarize_pcoas(data['coord'], data['support_pcoas'],
method=data['ellipsoid_method'])
data['coord'] = \
(m_names, matrix_average, data['coord'][2], data['coord'][3])
for i in range(len(m_names)):
sample_location[m_names[i]] = i
else:
matrix_average = None
matrix_low = None
matrix_high = None
eigval_average = None
m_names = None
iterator = 0
for coord_tup in coord_tups:
if isarray(matrix_low) and isarray(matrix_high) and \
isarray(matrix_average):
coord_1r = asarray(matrix_low)
coord_2r = asarray(matrix_high)
mat_ave = asarray(matrix_average)
else:
coord_1r = None
coord_2r = None
mat_ave = None
sample_location = None
coord_1, coord_2 = coord_tup
img_data[coord_tup] = draw_pcoa_graph(
plot_label, data_file_dir_path,
data_file_link, coord_1, coord_2,
coord_1r, coord_2r, mat_ave,
sample_location,
data, prefs, groups, colors,
background_color, label_color,
data_colors, data_color_order,
generate_eps=True,
pct_variation_below_one=pct_variation_below_one)
out_table += TABLE_HTML % (labelname,
"<br>".join(img_data[("1", "2")]),
"<br>".join(img_data[("3", "2")]),
"<br>".join(img_data[("1", "3")]))
if generate_scree:
data_file_dir_path = mkdtemp(dir=data_dir_path)
new_link = os.path.split(data_file_dir_path)
data_file_link = os.path.join(
'.',
os.path.split(new_link[-2])[-1],
new_link[-1])
img_src, download_link = draw_scree_graph(
data_file_dir_path, data_file_link, background_color,
label_color, generate_eps=True, data=data)
out_table += SCREE_TABLE_HTML % ("<br>".join((img_src, download_link)))
outfile = create_html_filename(filename, '.html')
outfile = os.path.join(html_dir_path, outfile)
write_html_file(out_table, outfile)
def scatter_ellipse(axis_ob, x, y, w, h, c='b', a=0.0, alpha=0.5):
"""
SCATTER_ELLIPSE(x, y, w=None, h=None, c='b', a=0.0)
Make a scatter plot of x versus y with ellipses surrounding the
center point. w and h represent the width
and height of the ellipse that surround each x,y coordinate.
They are arrays of the same length as x or y. c is
a color and can be a single color format string or an length(x) array
of intensities which will be mapped by the colormap jet. a is the
angle or rotation in degrees of each ellipse (anti-clockwise). It is
also an array of the same length as x or y or a single value to be
iterated over all points.
"""
if not axis_ob._hold:
axis_ob.cla()
if not iterable(a):
a = [a] * len(x)
if not iterable(alpha):
alpha = [alpha] * len(x)
if len(c) != len(x):
raise ValueError('c and x are not equal lengths')
if len(w) != len(x):
raise ValueError('w and x are not equal lengths')
if len(h) != len(x):
raise ValueError('h and x are not equal lengths')
if len(a) != len(x):
raise ValueError('a and x are not equal lengths')
# if len(alpha)!=len(x):
# raise ValueError, 'alpha and x are not equal lengths'
patches = []
for thisX, thisY, thisW, thisH, thisC, thisA, thisAl in \
zip(x, y, w, h, c, a, alpha):
ellip = Ellipse((thisX, thisY), width=thisW, height=thisH,
angle=thisA)
ellip.set_facecolor(thisC)
ellip.set_alpha(thisAl)
axis_ob.add_patch(ellip)
patches.append(ellip)
axis_ob.autoscale_view()
return axis_ob
| gpl-2.0 |
OSSHealth/ghdata | workers/release_worker/release_worker.py | 1 | 11703 | #SPDX-License-Identifier: MIT
import logging, os, sys, time, requests, json
from datetime import datetime
from multiprocessing import Process, Queue
from urllib.parse import urlparse
import pandas as pd
import sqlalchemy as s
from sqlalchemy import MetaData
from sqlalchemy.ext.automap import automap_base
from workers.worker_base import Worker
#TODO - fully edit to match releases
class ReleaseWorker(Worker):
"""
Worker that collects Repository Releases data from the Github API
and stores it in our database.
:param task: most recent task the broker added to the worker's queue
:param config: holds info like api keys, descriptions, and database connection strings
"""
def __init__(self, config={}):
worker_type = "release_worker"
# Define what this worker can be given and know how to interpret
given = [['github_url']]
models = ['releases']
# Define the tables needed to insert, update, or delete on
data_tables = ['releases']
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Define data collection info
self.tool_source = 'Release Worker'
self.tool_version = '1.0.0'
self.data_source = 'GitHub API'
def get_release_inf(self, repo_id, release, tag_only):
if not tag_only:
name = "" if release['author']['name'] is None else release['author']['name']
company = "" if release['author']['company'] is None else release['author']['company']
author = name + '_' + company
release_inf = {
'release_id': release['id'],
'repo_id': repo_id,
'release_name': release['name'],
'release_description': release['description'],
'release_author': author,
'release_created_at': release['createdAt'],
'release_published_at': release['publishedAt'],
'release_updated_at': release['updatedAt'],
'release_is_draft': release['isDraft'],
'release_is_prerelease': release['isPrerelease'],
'release_tag_name': release['tagName'],
'release_url': release['url'],
'tag_only': tag_only,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
else:
if 'tagger' in release['target']:
if 'name' in release['target']['tagger']:
name = release['target']['tagger']['name']
else:
name = ""
if 'email' in release['target']['tagger']:
email = '_' + release['target']['tagger']['email']
else:
email = ""
author = name + email
if 'date' in release['target']['tagger']:
date = release['target']['tagger']['date']
else:
date = ""
else:
author = ""
date = ""
release_inf = {
'release_id': release['id'],
'repo_id': repo_id,
'release_name': release['name'],
'release_author': author,
'release_tag_name': release['name'],
'tag_only': tag_only,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if date:
release_inf['release_created_at'] = date
return release_inf
def insert_release(self, task, repo_id, owner, release, tag_only = False):
# Get current table values
release_id_data_sql = s.sql.text("""
SELECT releases.release_id
FROM releases
WHERE repo_id = :repo_id
""")
self.logger.info(f'Getting release table values with the following PSQL query: \n{release_id_data_sql}\n')
release_id_data = pd.read_sql(release_id_data_sql, self.db, params={'repo_id': repo_id})
release_id_data = release_id_data.apply(lambda x: x.str.strip())
# Put all data together in format of the table
self.logger.info(f'Inserting release for repo with id:{repo_id}, owner:{owner}, release name:{release["name"]}\n')
release_inf = self.get_release_inf(repo_id, release, tag_only)
if release_id_data.size > 0 and release['id'] in release_id_data.values:
result = self.db.execute(self.releases_table.update().where(
self.releases_table.c.release_id==release['id']).values(release_inf))
self.logger.info(f"Release {release['id']} updated into releases table\n")
else:
result = self.db.execute(self.releases_table.insert().values(release_inf))
self.logger.info(f"Release {release['id']} inserted into releases table\n")
self.logger.info(f"Primary Key inserted into releases table: {result.inserted_primary_key}\n")
self.results_counter += 1
self.logger.info(f"Inserted info for {owner}/{repo_id}/{release['name']}\n")
#Register this task as completed
self.register_task_completion(task, repo_id, "releases")
return
def get_query(self, owner, repo, tag_only):
if not tag_only:
query = """
{
repository(owner:"%s", name:"%s"){
id
releases(orderBy: {field: CREATED_AT, direction: ASC}, last: %d) {
edges {
node {
name
publishedAt
createdAt
description
id
isDraft
isPrerelease
tagName
url
updatedAt
author {
name
company
}
}
}
}
}
}
""" % (owner, repo, 10)
else:
query = """
{
repository(owner:"%s", name:"%s"){
id
refs(refPrefix: "refs/tags/", last: %d){
edges {
node {
name
id
target {
... on Tag {
tagger {
name
email
date
}
}
}
}
}
}
}
}
""" % (owner, repo, 10)
return query
def fetch_data(self, task, repo_id, tag_only = False):
github_url = task['given']['github_url']
self.logger.info("Beginning filling the releases model for repo: " + github_url + "\n")
owner, repo = self.get_owner_repo(github_url)
url = 'https://api.github.com/graphql'
query = self.get_query(owner, repo, tag_only)
# Hit the graphql endpoint and retry 3 times in case of failure
num_attempts = 0
success = False
while num_attempts < 3:
self.logger.info("Hitting endpoint: {} ...\n".format(url))
r = requests.post(url, json={'query': query}, headers=self.headers)
self.update_gh_rate_limit(r)
try:
data = r.json()
except:
data = json.loads(json.dumps(r.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['message'] == 'API rate limit exceeded':
self.update_gh_rate_limit(r)
continue
if 'data' in data:
success = True
data = data['data']['repository']
break
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
break
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
self.update_gh_rate_limit(r, temporarily_disable=True)
continue
if data['message'] == 'Bad credentials':
self.update_gh_rate_limit(r, bad_credentials=True)
continue
num_attempts += 1
if not success:
self.register_task_failure(task, repo_id, "Failed to hit endpoint: {}".format(url))
return
data['owner'] = owner
return data
def releases_model(self, task, repo_id):
data = self.fetch_data(task, repo_id)
self.logger.info("repository value is: {}\n".format(data))
if 'releases' in data:
if 'edges' in data['releases'] and data['releases']['edges']:
for n in data['releases']['edges']:
if 'node' in n:
release = n['node']
self.insert_release(task, repo_id, data['owner'], release)
else:
self.logger.info("There's no release to insert. Current node is not available in releases: {}\n".format(n))
elif 'edges' in data['releases'] and not data['releases']['edges']:
self.logger.info("Searching for tags instead of releases...")
data = self.fetch_data(task, repo_id, True)
self.logger.info("refs value is: {}\n".format(data))
if 'refs' in data:
if 'edges' in data['refs']:
for n in data['refs']['edges']:
if 'node' in n:
release = n['node']
self.insert_release(task, repo_id, data['owner'], release, True)
else:
self.logger.info("There's no release to insert. Current node is not available in releases: {}\n".format(n))
else:
self.logger.info("There are no releases to insert for current repository: {}\n".format(data))
else:
self.logger.info("There are no refs in data: {}\n".format(data))
else:
self.logger.info("There are no releases to insert for current repository: {}\n".format(data))
else:
self.logger.info("Graphql response does not contain repository: {}\n".format(data))
| mit |
mueller-lab/PyFRAP | pyfrp/modules/pyfrp_sim_module.py | 2 | 23865 | #=====================================================================================================================================
#Copyright
#=====================================================================================================================================
#Copyright (C) 2014 Alexander Blaessle, Patrick Mueller and the Friedrich Miescher Laboratory of the Max Planck Society
#This software is distributed under the terms of the GNU General Public License.
#This file is part of PyFRAP.
#PyFRAP is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#===========================================================================================================================================================================
#Module Description
#===========================================================================================================================================================================
"""Simulaton module for PyFRAP toolbox. Handles simulating FRAP experiments and all necessary functions to do so, such as
* Handling initial conditions.
* Mimicing bleaching effects.
* Experiment simulation.
"""
#===========================================================================================================================================================================
#Improting necessary modules
#===========================================================================================================================================================================
#PDE Toolbox
from fipy import *
#Numpy/Scipy
import numpy as np
import scipy.interpolate as interp
import scipy.ndimage.interpolation as ndi
#matplotlib
import matplotlib.pyplot as plt
#Misc
import time
import sys
#PyFRAP Modules
import pyfrp_plot_module
import pyfrp_integration_module
import pyfrp_misc_module
from pyfrp_term_module import *
import pyfrp_idx_module
#===========================================================================================================================================================================
#Module Functions
#===========================================================================================================================================================================
def simulateReactDiff(simulation,signal=None,embCount=None,showProgress=True,debug=False):
r"""Simulates reaction diffusion equation goverining FRAP experiment.
Performs the following steps:
* Resets ``simVecs`` of all ROIs.
* If not generated yet, generates mesh (should never be the case!)
* Initializes PDE with Neumann boundary conditions resulting in the problem:
.. math::
\partial_t c = D \nabla^2 c - k_1 c + k_2,
where :math:`k_1` is the degradation rate and :math:`k_2` the production rate.
* Applies initial conditions defined in ``simulation.ICmode``.
* Simulates FRAP experimment.
Args:
simulation (pyfrp.subclasses.pyfrp_simulation.simulation): Simulation object.
Keyword Args:
signal (PyQt4.QtCore.pyqtSignal): PyQT signal to send progress to GUI.
embCount (int): Counter of counter process if multiple datasets are analyzed.
debug (bool): Print final debugging messages and show debugging plots.
showProgress (bool): Show simulation progress.
Returns:
pyfrp.subclasses.pyfrp_simulation.simulation: Updated simulation object.
"""
#Stepping and timescale
timeStepDuration = simulation.tvecSim[1]-simulation.tvecSim[0]
#Reset simulation vecs
for r in simulation.embryo.ROIs:
r.resetSimVec()
#Empty list to put simulation values in
vals=[]
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
print "Starting simulation"
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
startTimeTotal=time.clock()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Mesh Generation
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
startTimeMesh=time.clock()
if simulation.mesh.mesh==None:
printWarning('No mesh has been generated yet!')
a=raw_input('Do you want to generate a mesh now?[Y/N]')
if a=='Y':
simulation.mesh.genMesh()
print "Mesh created in", time.clock()-startTimeMesh
else:
print 'Cannot run simulation without mesh, will abort.'
return simulation
timeMesh=time.clock()-startTimeMesh
print "Mesh created after", time.clock()-startTimeTotal
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Initialization of PDE
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Create solution variable
phi = CellVariable(name = "solution variable",mesh = simulation.mesh.mesh,value = 0.)
#Apply initial conditions
if simulation.ICmode==0:
phi = applyROIBasedICs(phi,simulation)
elif simulation.ICmode==1:
phi = applyRadialICs(phi,simulation,debug=debug)
elif simulation.ICmode==2:
phi=applyImperfectICs(phi,simulation,simulation.embryo.geometry.getCenter(),100.,simulation.embryo.sliceHeightPx)
elif simulation.ICmode==3:
phi=applyInterpolatedICs(phi,simulation,debug=False)
elif simulation.ICmode==4:
phi=applyIdealICs(phi,simulation,bleachedROI=simulation.bleachedROI,valOut=simulation.valOut)
#Remember ICs
simulation.IC=np.asarray(phi.value).copy()
#Defining Type of equation
eq = TransientTerm() == DiffusionTerm(coeff=simulation.D)+simulation.prod-simulation.degr*phi
#Defining BCs
#Note: BCs are Neumann boundaries by default
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Calculating initial concentrations
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for r in simulation.embryo.ROIs:
r.getSimConc(phi,append=True)
if simulation.saveSim:
vals.append(np.asarray(phi.value).copy())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Solving PDE
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Keeping track of time
startTimeSim=time.clock()
avgTime=0
stepTime=0
#Choose solver
if simulation.solver=="LU":
mySolver = LinearLUSolver(iterations=simulation.iterations, tolerance=simulation.tolerance)
elif simulation.solver=="PCG":
mySolver = LinearPCGSolver(tolerance=simulation.tolerance,iterations=simulation.iterations)
for step in range(simulation.stepsSim-1):
#Compute timestep duration
timeStepDuration=simulation.tvecSim[step+1]-simulation.tvecSim[step]
#Solve PDE in this Step
stepStart=time.clock()
eq.solve(var=phi,dt=timeStepDuration,solver=mySolver)
stepTime=stepTime+(time.clock()-stepStart)
#Compute concentration
avgStart=time.clock()
for r in simulation.embryo.ROIs:
r.getSimConc(phi,append=True)
avgTime=avgTime+(time.clock()-avgStart)
#Save simulation array if necessary
if simulation.saveSim:
vals.append(np.asarray(phi.value).copy())
#Print Progress
if showProgress:
currPerc=int(100*step/float(simulation.stepsSim))
if signal==None:
sys.stdout.write("\r%d%%" %currPerc)
sys.stdout.flush()
else:
if embCount==None:
signal.emit(currPerc)
else:
signal.emit(currPerc,embCount)
print "Step time: ", stepTime, " in %:", stepTime/(time.clock()-startTimeSim)*100
print "Avg time: ", avgTime, " in %:", avgTime/(time.clock()-startTimeSim)*100
print "Simulation done after", time.clock()-startTimeTotal
#Save to simulation object only
if simulation.saveSim:
simulation.vals=list(vals)
return simulation
def rerunReactDiff(simulation,signal=None,embCount=None,showProgress=True,debug=False):
"""Reruns simulation by extracting values from ``simulation.vals``.
Performs the following steps:
* Resets ``simVecs`` of all ROIs.
* Extracts values per ROI from ``simulation.vals``.
.. note:: Only works if simulation has been run before with ``saveSim`` enabled.
Args:
simulation (pyfrp.subclasses.pyfrp_simulation.simulation): Simulation object.
Keyword Args:
signal (PyQt4.QtCore.pyqtSignal): PyQT signal to send progress to GUI.
embCount (int): Counter of counter process if multiple datasets are analyzed.
debug (bool): Print final debugging messages and show debugging plots.
showProgress (bool): Show simulation progress.
Returns:
pyfrp.subclasses.pyfrp_simulation.simulation: Updated simulation object.
"""
#Check if can be rerun
if len(simulation.vals)==0:
printWarning("Values have not been saved for this simulation. Turn on saveSim to do that. Won't do anything for now.")
return simulation
#Reset simulation vecs
for r in simulation.embryo.ROIs:
r.resetSimVec()
#Loop through vals
for i,val in enumerate(simulation.vals):
for r in simulation.embryo.ROIs:
r.getSimConc(val,append=True)
#Print Progress
if showProgress:
currPerc=int(100*i/float(len(simulation.vals)))
if signal==None:
sys.stdout.write("\r%d%%" %currPerc)
sys.stdout.flush()
else:
if embCount==None:
signal.emit(currPerc)
else:
signal.emit(currPerc,embCount)
return simulation
def applyROIBasedICs(phi,simulation):
"""Applies ROI-based initial conditions.
First sets concentration on all mesh nodes equal to `simulation.embryo.analysis.concRim`.
Afterwards, mesh nodes get assigned the value of the first entry ``dataVec`` of
the ROI covering them. Note: If a mesh node is covered by two ROIs, will assign the value
of the ROI that is last in embryo's ``ROIs`` list. See also
:py:func:`pyfrp.subclasses.pyfrp_simulation.setICMode`.
Args:
phi (fipy.CellVariable): PDE solution variable.
simulation (pyfrp.subclasses.pyfrp_simulation.simulation): Simulation object.
Returns:
fipy.CellVariable: Updated solution variable.
"""
phi.setValue(simulation.embryo.analysis.concRim)
for r in simulation.embryo.ROIs:
phi.value[r.meshIdx]=r.dataVec[0]
def applyIdealICs(phi,simulation,bleachedROI=None,valOut=None):
"""Applies ideal initial conditions.
That is, everything falling inside the bleached ROI in
x-y-direction will be set its initial dataVec value,
everything else will be set equal to valOut.
.. note:: The ``bleachedROI`` and valOut are often stored inside the simulation
object. If those two cannot be found, will try to find a ROI called *Bleached Square*
for the bleached ROI and set valOut to ``concRim``. If this again fails, will return
error.
Args:
phi (fipy.CellVariable): PDE solution variable.
simulation (pyfrp.subclasses.pyfrp_simulation.simulation): Simulation object.
Keyword Args:
bleachedROI (pyfrp.subclasses.pyfrp_ROI.ROI): Bleached ROI.
valOut (float): Value to be assigned outside of bleached ROI.
Returns:
fipy.CellVariable: Updated solution variable.
"""
if bleachedROI==None:
bleachedROI=simulation.embryo.getROIByName("Bleached Square")
if bleachedROI==None:
printError("No bleachedROI can be found in applyIdealICs.")
return phi
if valOut==None:
if simulation.embryo.analysis.concRim!=None:
valOut=simulation.embryo.analysis.concRim
else:
printError("No bleachedROI can be found in applyIdealICs.")
return phi
#Set all values in mesh to valOut
phi.setValue(valOut)
#Find indices of all nodes that lie inside bleachedROI in x-y-direction
x,y,z=simulation.mesh.getCellCenters()
ind=bleachedROI.checkXYInside(np.asarray(x),np.asarray(y))
#Set these nodes equal to first dataVec entry
phi.value[ind]=bleachedROI.dataVec[0]
return phi
def applyRadialICs(phi,simulation,radSteps=15,debug=False):
"""Applies radially averaged image data to solution variable as IC.
.. note:: Will use ``embryo.geometry.center`` as center circle and the maximum
distant pixel from the center as maximum radius.
Args:
phi (fipy.CellVariable): PDE solution variable.
simulation (pyfrp.subclasses.pyfrp_simulation.simulation): Simulation object.
radSteps (int): Number of radial levels.
debug (bool): Print debugging messages.
Returns:
fipy.CellVariable: Updated solution variable.
"""
#Adjust center so histogram works for 'quad'
if 'quad' in simulation.embryo.analysis.process.keys():
center=[0,0]
else:
center=simulation.ICimg,simulation.embryo.geometry.getCenter()
#Compute radial histogram of IC image
maxR=pyfrp_img_module.dist(center,[simulation.ICimg.shape[0],simulation.ICimg.shape[0]])
bins,binsMid,histY,binY=pyfrp_img_module.radialImgHist(simulation.ICimg,nbins=radSteps,byMean=True,maxR=maxR)
#Set center to actual center of geometry
center=simulation.ICimg,simulation.embryo.geometry.getCenter()
#Apply value of most outer bin to all nodes
phi.setValue(binY[-1])
#Loop through all bins and apply values from outside to inside
binsY=binsY.reverse()
bins=bins.reverse()
for i in range(len(binY)):
phi.setValue(binY[i], where=(x-center[0])**2+(y-center[1])**2 < bins[i]**2)
if debug:
print "Applied concentration", binY[i], " to all nodes with radius <", bins[i]
return phi
def applyInterpolatedICs(phi,simulation,matchWithMaster=True,debug=False,fixNeg=True,fillICWithConcRim=True):
"""Interpolates initial conditions onto mesh.
Uses a bivarariate spline interpolation (http://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.interpolate.RectBivariateSpline.html)
to generate an interpolation function of the IC image. Then applies interpolated values to solution variable ``phi`` if mesh nodes are inside
image and masterROI. If not, will apply rim concentration.
.. note:: If no rim concentration has been calculated (for example through running the data analysis)
applyInterpolatedICs will try to compute ``concRim`` by itself. For this it will take the mean concentration outside of bleached square but inside
``masterROI``.
.. note:: The bleached square used here is not defined as a ``ROI`` object here, but rather through the properties
``embryo.sideLengthBleachedPx`` and ``embryo.offsetBleachedPx``. This might change in future versions.
Args:
phi (fipy.CellVariable): PDE solution variable.
simulation (pyfrp.subclasses.pyfrp_simulation.simulation): Simulation object.
Keyword Args:
matchWithMaster (bool): Match interpolation indices with ``masterROI`` indices.
debug (bool): Print debugging messages.
Returns:
fipy.CellVariable: Updated solution variable.
"""
#Get image resolution and center of geometry
res=simulation.ICimg.shape[0]
center=simulation.embryo.geometry.getCenter()
#Define x/y coordinates of interpolation
if 'quad' in simulation.embryo.analysis.process.keys():
#Shift everything by center to fit with the mesh
xInt = np.arange(center[0]+1, center[0]+res+1, 1)
yInt = np.arange(center[1]+1, center[1]+res+1, 1)
else:
xInt = np.arange(1, res+1, 1)
yInt = np.arange(1, res+1, 1)
#Getting cell centers
x,y,z=simulation.mesh.getCellCenters()
#Finding outer rim concentration
if simulation.embryo.analysis.concRim==None:
printWarning('concRim is not analyzed yet. Will use concentration outside of bleached region as approximation')
#Grab offset and sidelength
offset=simulation.embryo.offsetBleachedPx
sidelength=simulation.embryo.sideLengthBleachedPx
#Get indices outside of bleached square but inside masterROI
indXSqu,indYSqu=pyfrp_idx_module.getSquareIdxImg(simulation.embryo.offsetBleachedPx,simulation.embryo.sideLengthBleachedPx,simulation.embryo.dataResPx)
masterROI=simulation.embryo.getMasterROI()
indX=pyfrp_misc_module.complValsSimple(masterROI.imgIdxX,indXSqu)
indY=pyfrp_misc_module.complValsSimple(masterROI.imgIdxX,indYSqu)
if 'quad' in simulation.embryo.analysis.process.keys():
img=pyfrp_img_module.unflipQuad(np.flipud(simulation.ICimg))
else:
img=simulation.ICimg
concRim=pyfrp_img_module.meanConc(img[indX,indY])
print 'Approximate concRim = ', concRim
else:
concRim=simulation.embryo.analysis.concRim
if fillICWithConcRim:
masterROI=simulation.embryo.getMasterROI()
ICimg=concRim*np.ones(simulation.ICimg.shape)
ICimg[masterROI.imgIdxX,masterROI.imgIdxY]=simulation.ICimg[masterROI.imgIdxX,masterROI.imgIdxY]
else:
ICimg=simulation.ICimg.copy()
#Generate interpolation function
f=interp.RectBivariateSpline(xInt, yInt, ICimg.T, bbox=[None, None, None, None], kx=3, ky=3, s=0)
#Set all values of solution variable to concRim
phi.setValue(concRim)
#Get Offset of image and check which nodes are inside image
if 'quad' in simulation.embryo.analysis.process.keys():
offset=[simulation.embryo.dataResPx/2,simulation.embryo.dataResPx/2]
ins=pyfrp_idx_module.checkInsideImg(x,y,simulation.embryo.dataResPx/2,offset=offset)
else:
offset=[0,0]
ins=pyfrp_idx_module.checkInsideImg(x,y,simulation.embryo.dataResPx,offset=offset)
#Convert into indices
ind=np.arange(len(x))
ind=ind[np.where(ins)[0]]
"""NOTE: I think we need to match here indices inside image with the one of master ROI, so we don't apply
values outside of masterROI (generally just background) to nodes that lie INSIDE image, but OUTSIDE of masterROI.
"""
if matchWithMaster:
masterROI=simulation.embryo.getMasterROI()
xnew=np.asarray(x)[ind]
ynew=np.asarray(y)[ind]
ins=masterROI.checkXYInside(xnew,ynew)
ind=np.asarray(ind)
ind=ind[np.where(ins)[0]]
ind=list(ind)
#Apply interpolation
try:
phi.value[ind]=f.ev(x[ind],y[ind])
except IndexError:
if debug:
printNote("Changed index array to nparray b/c of IndexError.")
ind=np.array(ind)
phi.value[ind]=f.ev(x[ind],y[ind])
#Fix negative values if selected
if fixNeg:
phi=fixNegValues(phi)
return phi
def fixNegValues(phi,minVal=None):
"""Fixes negative values in solution variable.
Interpolation sometimes returns negative values if gradients are really steep. Will
apply ``minVal`` to such nodes.
If ``minVal==None``, will take smallest non-negative value of solution value.
"""
if minVal==None:
minVal=min(phi.value[np.where(phi.value>=0)[0]])
phi.value[np.where(phi.value<0)[0]]=minVal
return phi
def sigmoidBleachingFct(x,y,z,center,rJump,sliceHeight,maxVal=1.,maxMinValPerc=0.25,minMinValPerc=0.25,rate=0.1):
r"""Generates sigmoid scaling function for imperfect bleaching at
coordinates x/y/z.
The idea behind the sigmoid function is:
* Through scattering and other effects, the bleached window becomes blurry in larger depths, resulting
in a radial sigmoid scaling function around ``center``.
* Similarly, bleaching intensity increases with depth. Thus, a linear term controls the values close
to ``center`` of the sigmoid function. Bleaching closer to the laser than the imaged height will
be rendered stronger, while bleaching effects below will be decreased by *bumping up* the
bleached window. However, only until some threshold is reached.
The sigmoid function is given by:
.. math:: s(r,z) = v_{\mathrm{min}}(z)+(v_{\mathrm{max}}-v_{\mathrm{min}}(z))\frac{1}{1+\exp(-\rho(r-r_\mathrm{Jump}))},
where :math:`\rho` is the sigmoid slope given by ``rate``, :math:`r_\mathrm{Jump}` is the radius from ``center``
at which sigmoid function has its **jump**, given by ``rJump`` and :math:`r` the radius of coordinate ``[x,y]`` from
``center``.
:math:`v_{\mathrm{min}}(z)` is a linear function describing how strong the bleaching is dependent on the
depth :math:`z` given by
.. math:: v_{\mathrm{min}}(z) = \frac{v_{\mathrm{max}} - v_{\mathrm{max. bleach}}}{h_s} z + v_{\mathrm{max. bleach}},
where :math:`v_{\mathrm{max}}` is the value of the sigmoid function far from ``center``, :math:`v_{\mathrm{max. bleach}}`
is the strongest expected bleaching value (mostly at :math:`z=0`) and :math:`h_s` is the height of the imaging slice, given
by ``sliceHeight``.
The maximum rate of bleaching :math:`v_{\mathrm{max. bleach}}` is computed by:
.. math:: v_{\mathrm{max. bleach}} = (1-p_{\mathrm{max. bleach}})v_{\mathrm{max}},
where :math:`p_{\mathrm{max. bleach}}` is the percentage of maximum expected bleaching compared to the values in the imaging
height, given by ``maxMinValPerc``. That is, by how much has the laser power already decreased on its way from entry point of
the sample to the imaging height.
For sample depths deeper than the imaging height, bleaching is expected to be decrease in intensity, thus the bleached area
is getting **bumped up**. To avoid bumping the bleached area by too much, eventually even resulting in the bleached
area having a higher concentration than the area outside, the sigmoid function has a cut-off: If values of :math:`s(r,z)` pass
.. math:: v_{\mathrm{min. bleach}} = (1+p_{\mathrm{min. bleach}})v_{\mathrm{max}},
where :math:`p_{\mathrm{min. bleach}}` is the percentage of bleaching to cut-off, then we set
.. math:: s(r,z) = v_{\mathrm{min. bleach}},
ultimately resulting in a scaling function given by
.. math:: s(r,z) = \left\{\begin{array}{cc}
v_{\mathrm{min}}(z)+(v_{\mathrm{max}}-v_{\mathrm{min}}(z))\frac{1}{1+\exp(-\rho(r-r_\mathrm{Jump}))} & \mbox{ if } s(r,z) <= v_{\mathrm{min. bleach}} , \\
v_{\mathrm{min. bleach}} & \mbox{ else }
\end{array}
\right.
.. image:: ../imgs/pyfrp_sim_module/sigmoidFct.png
Args:
x (numpy.ndarray): x-coordinates.
y (numpy.ndarray): y-coordinates.
z (numpy.ndarray): z-coordinates.
center (list): Center of bleaching.
rJump (float): Radius from center where sigmoid jump is expected.
sliceHeight (float): Height at which dataset was recorded.
Keyword Args:
maxVal (float): Value of sigmoid function outside of bleached region.
maxMinValPerc (float): Percentage of maximum bleaching intensity.
minMinValPerc (float): Percentage of minimum bleaching intensity.
rate (float): Rate at which sigmoid increases.
Returns:
numpy.ndarray:
"""
#Calculate linear equation describing how strong bleaching effect decreases as z increases
maxMinVal=(1-maxMinValPerc)*maxVal
m=(maxVal-maxMinVal)/sliceHeight
b=maxMinVal
minVal=m*z+b
#Compute distance from center for each point
r=np.sqrt((x-center[0])**2+(y-center[1])**2)
#Compute sigmoid function
sigm=minVal+(maxVal-minVal)/(1+np.exp(-rate*(r-rJump)))
#Compute cut-off, so we do not amplifiy the bleached region
minMinVal=(1+minMinValPerc)*maxVal
sigm[np.where(sigm>minMinVal)]=minMinVal
return sigm,r
def applyImperfectICs(phi,simulation,center,rJump,sliceHeight,maxVal=1.,maxMinValPerc=0.25,minMinValPerc=None,rate=0.1,matchWithMaster=True,debug=False):
"""Mimic imperfect bleaching through cone approximation, return phi.
.. warning:: Not working in current version. Will be integrated in further versions again.
"""
phi = applyInterpolatedICs(phi,simulation,matchWithMaster=matchWithMaster,debug=debug)
#Get cell coordinates
x,y,z=simulation.mesh.getCellCenters()
x=np.asarray(x)
y=np.asarray(y)
z=np.asarray(z)
#Compute by how much
if minMinValPerc==None:
r=np.sqrt((x-center[0])**2+(y-center[1])**2)
inVal=np.mean(phi.value[np.where(r<rJump)])
outVal=np.mean(phi.value[np.where(r>=rJump)])
minMinValPerc=inVal/outVal
#Compute sigmoid function
sigm,r = sigmoidBleachingFct(x,y,z,center,rJump,sliceHeight,maxVal=1.,maxMinValPerc=maxMinValPerc,minMinValPerc=minMinValPerc,rate=rate)
#Multiplicate solution variable with sigmoid function
phi.value = phi.value * sigm
return phi
| gpl-3.0 |
jereze/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
navigator8972/ensemble_ioc | PyMDP.py | 1 | 20197 | """
A simple module for solving model-based MDP problem
Discrete state/action space, known transition matrix...
A simplified python version similar to the one in Drake toolkit
(https://github.com/RobotLocomotion/drake)
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
import PyMDP_Utils as utils
class DynamicalSystem:
"""
A class to define a dynamical system: might be discrete time or continous time...
"""
def __init__(self, is_ct=True):
self.is_ct_ = is_ct
# self.sysfunc_ = sysfunc #a system function accepts state and control and
# #return derivative/next state, possibly with higher order stuff
return
def Dynamics(self, x, u, t=None, parms=None):
print 'Calling base class dynamics...'
return
def IsGoalState(self, x):
print 'Base class function to decide if this is the goal state'
return False
def RandomSelectFeasibleState(self, x):
print 'Base class function to select a random feasible state'
return False
class MarkovDecisionProcess:
def __init__(self, S=None, A=None, T=None, C=None, gamma=1.0):
self.S_ = S # states: if multiple dimension, then S[:, i] is state i
self.A_ = A # actions: if multiple dimension, then A[:, i] is action i
self.T_ = T # transition: T[k][i, j] = P(s_{n+1}=s[:, j] | s_{n}=s[:, i], a_{n}=[:, k])
self.C_ = C # cost: cost C(s(:, i), a(:, k)) being at state i and taking action k
self.gamma_ = gamma #discounted rate
self.sub2ind_ = None # for mapping multiple dimensional discrete state to flattened state space
return
def MakeSub2Ind(self, xbins):
"""
function to generate a fast mapping from the vector of indices of dimensions to the flattened state space
"""
nskip = np.concatenate([[1], np.cumprod([len(xbins[i]) for i in range(len(xbins)-1)])])
self.sub2ind_ = lambda subinds: np.sum(nskip*np.array(subinds))
return self.sub2ind_
def MakeXDigitize(self, xbins):
"""
function to generate a mapping from a continous x to its digitized state according to the bins
"""
#the first one will not be used
effective_xbins = [xbin[1:] for xbin in xbins]
self.xdigitize_ = lambda x: [np.digitize([dim], bin)[0] for dim, bin in zip(x, effective_xbins)]
self.xdigitize_dim_ = lambda x, dim_idx: np.digitize([x[dim_idx]], effective_xbins[dim_idx])[0]
return self.xdigitize_, self.xdigitize_dim_
def DiscretizeSystem(self, sys, costfunc, xbins, ubins, options=dict()):
#check system
is_ct = sys.is_ct_
#check dimension
if not isinstance(xbins, list):
#convert to multi-dimension case
self.xbins_ = [xbins]
else:
self.xbins_ = xbins
if not isinstance(ubins, list):
self.ubins_ = [ubins]
else:
self.ubins_ = ubins
self.state_dim_ = len(self.xbins_)
self.ctrl_dim = len(self.ubins_)
if 'dt' not in options:
self.dt_ = 1.0
else:
self.dt_ = options['dt']
if 'wrap_flag' not in options:
self.wrap_flag_ = False * np.ones(self.state_dim_)
else:
self.wrap_flag_ = options['wrap_flag']
wrap_idx = np.where(self.wrap_flag_==True)[0]
xmin = np.array([bin[0] for bin in self.xbins_])
xmax = np.array([bin[-1] for bin in self.xbins_])
#construct grids
#state
Sgrid = np.meshgrid(*self.xbins_)
#for each dim, need to reshape to a long 1-d array
self.S_ = np.array([np.reshape(dim, (1, -1))[0] for dim in Sgrid])
#action
Agrid = np.meshgrid(*self.ubins_)
self.A_ = np.array([np.reshape(dim, (1, -1))[0] for dim in Agrid])
self.num_state_ = self.S_.shape[1]
self.num_action_ = self.A_.shape[1]
#prepare the transition matrix
# self.T_ = csr_matrix([np.zeros([self.num_state_, self.num_state_]) for dim_ix in range(self.num_action_)])
# self.T_ = [csr_matrix(np.zeros([self.num_state_, self.num_state_])) for dim_ix in range(self.num_action_)]
self.T_ = [np.zeros([self.num_state_, self.num_state_]) for dim_ix in range(self.num_action_)]
#prepare cost function
self.C_ = np.zeros([self.num_state_, self.num_action_])
#inline function to search index in reshaped state
#offset for sub2ind
sub2ind = self.MakeSub2Ind(self.xbins_)
xdigitize, xdigitize_dim = self.MakeXDigitize(self.xbins_)
print 'Constructing transition matrix...'
#vectorize this to increase the efficiency if possible...
for action_idx in range(self.num_action_):
for state_idx in range(self.num_state_):
if is_ct:
# the system must be an update equation
x_new = sys.Dynamics(self.S_[:, state_idx], self.A_[:, action_idx])
if np.isinf(costfunc(self.S_[:, state_idx], self.A_[:, action_idx], sys)) or np.isnan(costfunc(self.S_[:, state_idx], self.A_[:, action_idx], sys)):
print self.S_[:, state_idx], self.A_[:, action_idx]
else:
self.C_[state_idx, action_idx] = sys.dt_ * costfunc(self.S_[:, state_idx], self.A_[:, action_idx], sys)
if isinstance(x_new, list):
#contains both expected state and diagonal Gaussian noise...
x_new_mu = x_new[0]
x_new_sig = x_new[1]
if len(x_new_mu) != len(x_new_sig):
print 'Inconsistent length of state and noise vector...'
return
#wrap x_new if needed, this is useful for state variable like angular position
x_new_mu[wrap_idx] = np.mod(x_new_mu[wrap_idx] - xmin[wrap_idx],
xmax[wrap_idx] - xmin[wrap_idx]) + xmin[wrap_idx]
x_new_mu_idx = xdigitize(x_new_mu)
x_new_mu_digitized_state = self.S_[:, sub2ind(x_new_mu_idx)]
coeff_lst = []
involved_states = []
for dim_idx in range(len(x_new_mu)):
tmp_x_new_mu_idx = [idx for idx in x_new_mu_idx]
#for each dim, try to crawl the grid
#find lower bound, use the interval [-2*sigma, 2*sigma]
#how to wrap here? or just truncate the shape of gaussian?...
x_new_mu_tmp_min = np.array(x_new_mu)
x_new_mu_tmp_max = np.array(x_new_mu)
x_new_mu_tmp_min[dim_idx] += -2*x_new_sig[dim_idx]
x_new_mu_tmp_max[dim_idx] += 2*x_new_sig[dim_idx]
min_idx = xdigitize_dim(x_new_mu_tmp_min, dim_idx)
max_idx = xdigitize_dim(x_new_mu_tmp_max, dim_idx)
for step_idx in range(min_idx, max_idx+1):
tmp_x_new_mu_idx[dim_idx] = step_idx
#get the index of involved state
involved_state_idx = sub2ind(tmp_x_new_mu_idx)
involved_states.append(involved_state_idx)
coeff_lst.append(np.exp(-np.linalg.norm(((self.S_[:, involved_state_idx] - x_new_mu_digitized_state)/x_new_sig))**2))
coeff_lst = coeff_lst / np.sum(coeff_lst)
#assign transition probability for each state
for coeff, involved_state_idx in zip(coeff_lst.tolist(), involved_states):
self.T_[action_idx][state_idx, involved_state_idx] += coeff
else:
#only updated state is available, need to map it to the grid
#add Baryinterpolation?
#wrap x_new if needed, this is useful for state variable like angular position
x_new[wrap_idx] = np.mod(x_new[wrap_idx] - xmin[wrap_idx],
xmax[wrap_idx] - xmin[wrap_idx]) + xmin[wrap_idx]
#barycentricinterpolation...
indices, coeffs = utils.BarycentricInterpolation(self.xbins_, np.array([x_new]))
for i in range(len(indices[0])):
self.T_[action_idx][state_idx, int(indices[0, i])] = coeffs[0, i]
else:
#discrete state dynamical system...
#for discrete state dynamics, take the direct returned states and associated probability
x_new_lst = sys.Dynamics(self.S_[:, state_idx], self.A_[:, action_idx])
self.C_[state_idx, action_idx] = costfunc(self.S_[:, state_idx], self.A_[:, action_idx], sys)
for x_new in x_new_lst:
#get index of x_new
x_new_idx = xdigitize(x_new[0])
state_new_idx = sub2ind(x_new_idx)
self.T_[action_idx][state_idx, state_new_idx] = x_new[1]
#check the T matrix
# for action_idx in range(self.num_action_):
# if np.isinf(self.T_[action_idx]).any():
# print action_idx
# print np.isinf(self.T_[action_idx])
return
def ValueIteration(self, converged=.01, drawFunc=None, detail=False):
J = np.zeros(self.num_state_)
err = np.inf
n_itrs = 0
#prepare detailed result if necessary...
if detail:
res = dict()
hist = []
#vertical stack
Tstack = np.vstack(self.T_)
print 'Start Value Iteration...'
ax = None
while err > converged:
Jold = np.array(J)
#<hyin/Apr-14th-2015> note the reshape sequence of dot result
J = np.amin(self.C_ + self.gamma_*np.reshape(Tstack.dot(J), (self.num_action_, self.num_state_)).transpose(), axis=1)
# print 'iterating...'
# if np.isinf(J).any() or np.isinf(Tstack).any():
# print np.isinf(J), np.isinf(Tstack)
err = np.amax(np.abs(Jold-J))
if detail:
#record current itr, J, Q, err
tmp_rec = dict()
curr_Q = self.ActionValueFuncFromValueFunc(J)
tmp_rec['value_func'] = J
tmp_rec['action_value_func'] = curr_Q
tmp_rec['error'] = err
hist.append(tmp_rec)
print 'Iteration:', n_itrs, 'Error:', err
n_itrs+=1
if drawFunc is not None:
ax = drawFunc(self, J, ax)
if detail:
res['value_opt'] = hist[-1]['value_func']
res['action_value_opt'] = hist[-1]['action_value_func']
res['history'] = hist
return res
else:
return J
def ActionValueFuncFromValueFunc(self, J_opt):
"""
Get Q action value function from optimal value function
"""
def ActionValueValueIteration(T, C, J):
Q = T.dot(C + self.gamma_*J)
return Q
Q_opt = [ActionValueValueIteration(self.T_[action_idx], self.C_[:, action_idx], J_opt) for action_idx in range(self.num_action_)]
return Q_opt
def ChooseOptimalActionFromQFunc(self, Q, state_idx):
#helper function to choose optimal action from Q function
#enumerate q values for all possible actions
q = [Q[i][state_idx] for i in range(self.num_action_)]
Q_min = min(q)
count = q.count(Q_min)
if count > 1:
best = [i for i in range(self.num_action_) if q[i]==Q_min]
action_idx = np.random.choice(best)
else:
action_idx = q.index(Q_min)
return action_idx
def QLearningSarsa(self, sys, epsilon=0.2, alpha=0.05, max_itrs=5000, drawFunc=None, detail=False):
"""
learn Q from a dynamically learned policy
alpha - learning rate
max_itrs - number of steps to explore
only support discrete state dynamical system
"""
n_time_reach_goal = 0
n_choose_optimal_action = 0
n_steps_to_reach_goal = 0
Q = [np.ones(self.num_state_)*0 for i in range(self.num_action_)]
err = np.inf
n_itrs = 0
#prepare detailed result if necessary...
if detail:
res = dict()
hist = []
sub2ind = self.MakeSub2Ind(self.xbins_)
xdigitize, xdigitize_dim = self.MakeXDigitize(self.xbins_)
#generate a random initial state
x = sys.RandomSelectFeasibleState()
x_idx = xdigitize(x)
state_idx = sub2ind(x_idx)
#generate an action according to the epsilon greedy policy
explore_dice = np.random.random_sample()
if explore_dice < epsilon:
#choose a random action
action_idx = np.random.randint(low=0, high=self.num_action_)
else:
#greedy under current Q function
action_idx = self.ChooseOptimalActionFromQFunc(Q, state_idx)
n_choose_optimal_action += 1
while n_itrs < max_itrs:
#follow dynamics to get x_new, note the discrete dynamics returns
#an array of new states and their associated probability
x_new_lst = sys.Dynamics(self.S_[:, state_idx], self.A_[:, action_idx])
c = self.C_[state_idx, action_idx]
# print 'c:', c
x_new_prob = [x_new[1] for x_new in x_new_lst]
x_new_dice = np.argmax(np.random.multinomial(1, x_new_prob, size=1))
x_new = x_new_lst[x_new_dice][0]
x_new_idx = xdigitize(x_new)
state_new_idx = sub2ind(x_new_idx)
explore_dice_new = np.random.random_sample()
if explore_dice_new < epsilon:
#choose a random action
action_new_idx = np.random.randint(low=0, high=self.num_action_)
else:
#greedy under current Q function
action_new_idx = self.ChooseOptimalActionFromQFunc(Q, state_new_idx)
# print 'Choose current optimal action!', action_new_idx
n_choose_optimal_action += 1
#update curr Q value for current state index and action index
if Q[action_idx][state_idx] == np.inf:
Q[action_idx][state_idx] = c
else:
Q[action_idx][state_idx] += alpha*(c + self.gamma_*Q[action_new_idx][state_new_idx] - Q[action_idx][state_idx])
#check if new state is a terminal one...
if sys.IsGoalState(x_new):
# print 'Used ', n_steps_to_reach_goal, ' to reach the goal.'
# raw_input()
n_steps_to_reach_goal=0
n_time_reach_goal += 1
# raw_input()
#zero costs for x_new as it is the goal state
for action_idx in range(self.num_action_):
Q[action_idx][state_new_idx] = 0.0
#a new random state
x = sys.RandomSelectFeasibleState()
x_idx = xdigitize(x)
state_idx = sub2ind(x_idx)
explore_dice = np.random.random_sample()
if explore_dice < epsilon:
#choose a random action
action_idx = np.random.randint(low=0, high=self.num_action_)
else:
#greedy under current Q function
action_idx = self.ChooseOptimalActionFromQFunc(Q, state_idx)
# print 'Choose current optimal action!', action_idx
n_choose_optimal_action += 1
else:
state_idx = state_new_idx
action_idx = action_new_idx
if detail:
tmp_rec = dict()
tmp_rec['action_value_func'] = np.array(Q, copy=True)
hist.append(tmp_rec)
print 'Iteration:', n_itrs
n_itrs+=1
n_steps_to_reach_goal+=1
print 'Times of reaching the goal:', n_time_reach_goal
print 'Times of choosing optimal action', n_choose_optimal_action
if detail:
res['action_value_opt'] = hist[-1]['action_value_func']
res['history'] = hist
return res
else:
return Q
return
def QLearningEpsilonGreedy(self, sys, epsilon=0.2, alpha=0.05, max_itrs=5000, drawFunc=None, detail=False):
"""
learn Q given an initial guess and epsilon greedy policy
epsilon - probability to deviate from the greedy policy to explore
alpha - learning rate
max_itrs - number of steps to explore
only support discrete state dynamical system
"""
Q = [np.ones(self.num_state_)*0 for i in range(self.num_action_)]
err = np.inf
n_itrs = 0
#prepare detailed result if necessary...
if detail:
res = dict()
hist = []
sub2ind = self.MakeSub2Ind(self.xbins_)
xdigitize, xdigitize_dim = self.MakeXDigitize(self.xbins_)
#generate a random initial state
x = sys.RandomSelectFeasibleState()
x_idx = xdigitize(x)
state_idx = sub2ind(x_idx)
while n_itrs < max_itrs:
#generate an action according to the epsilon greedy policy
#throw a dice
explore_dice = np.random.random_sample()
if explore_dice < epsilon:
#choose a random action
action_idx = np.random.randint(low=0, high=self.num_action_)
else:
#greedy under current Q function
action_idx = self.ChooseOptimalActionFromQFunc(Q, state_idx)
#follow dynamics to get x_new, note the discrete dynamics returns
#an array of new states and their associated probability
x_new_lst = sys.Dynamics(self.S_[:, state_idx], self.A_[:, action_idx])
c = self.C_[state_idx, action_idx]
x_new_prob = [x_new[1] for x_new in x_new_lst]
print 'x_new_prob:', x_new_prob
x_new_dice = np.argmax(np.random.multinomial(1, x_new_prob, size=1))
x_new = x_new_lst[x_new_dice][0]
print 'x_new:', x_new
x_new_idx = xdigitize(x_new)
state_new_idx = sub2ind(x_new_idx)
#update curr Q value for current state index and action index
if Q[action_idx][state_idx] == np.inf:
Q[action_idx][state_idx] = c
else:
Q[action_idx][state_idx] += alpha*(c + self.gamma_*min([Q[i][state_new_idx]
for i in range(self.num_action_)]) - Q[action_idx][state_idx])
#check if new state is a terminal one...
if sys.IsGoalState(x_new):
#zero costs for x_new as it is the goal state
for action_idx in range(self.num_action_):
Q[action_idx][state_new_idx] = 0.0
#a new random state
x = sys.RandomSelectFeasibleState()
x_idx = xdigitize(x)
state_idx = sub2ind(x_idx)
else:
state_idx = state_new_idx
if detail:
tmp_rec = dict()
tmp_rec['action_value_func'] = np.array(Q, copy=True)
hist.append(tmp_rec)
print 'Iteration:', n_itrs
n_itrs+=1
if detail:
res['action_value_opt'] = hist[-1]['action_value_func']
res['history'] = hist
return res
else:
return Q
| bsd-2-clause |
miishke/dot_files | jupyter_notebook/ipython_notebook_config.py | 1 | 24745 | # Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.browser = u'/usr/bin/chromium-browser %s --incognito'
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = 'localhost'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.NotebookApp.extra_config_file = ''
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# The date format used by logging formatters for %(asctime)s
# c.NotebookApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = ''
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = ''
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
# c.NotebookApp.open_browser = True
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = ''
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = b''
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = ''
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# Set the log level by value or name.
# c.NotebookApp.log_level = 30
# The IPython profile to use.
# c.NotebookApp.profile = 'default'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.webapp_settings = {}
# The notebook manager class to use.
# c.NotebookApp.notebook_manager_class = 'IPython.html.services.notebooks.filenbmanager.FileNotebookManager'
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = ''
# The directory to use for notebooks and kernels.
# c.NotebookApp.notebook_dir = '/home/uranus/Dropbox/PYTHON/IPnb'
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# paths for Javascript extensions. By default, this is just
# IPYTHONDIR/nbextensions
# c.NotebookApp.nbextensions_path = []
#
# c.NotebookApp.file_to_run = ''
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = ''
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
#
# c.IPKernelApp.transport = 'tcp'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# The IPython profile to use.
# c.IPKernelApp.profile = 'default'
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = ''
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
#
# c.IPKernelApp.parent_appname = ''
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = ''
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = 'IPython.kernel.zmq.ipkernel.Kernel'
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# A file to be run
# c.IPKernelApp.file_to_run = ''
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.separate_out2 = ''
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
#
# c.ZMQInteractiveShell.separate_in = '\n'
#
# c.ZMQInteractiveShell.xmode = 'Context'
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
#
# c.ZMQInteractiveShell.history_length = 10000
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
#
# c.ZMQInteractiveShell.separate_out = ''
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.debug = False
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = ''
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# path to file containing execution key.
# c.Session.keyfile = ''
# Debug output in the Session
# c.Session.debug = False
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'uranus'
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The UUID identifying this session.
# c.Session.session = ''
# execution key, for extra authentication.
# c.Session.key = b''
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# A set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
# c.InlineBackend.figure_formats = {'png'}
# Extra kwargs to be passed to fig.canvas.print_figure.
#
# Logical examples include: bbox_inches, quality (for jpeg figures), etc.
# c.InlineBackend.print_figure_kwargs = {'bbox_inches': 'tight'}
# The figure format to enable (deprecated use `figure_formats` instead)
# c.InlineBackend.figure_format = ''
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'savefig.dpi': 72, 'figure.figsize': (6.0, 4.0), 'figure.facecolor': (1, 1, 1, 0), 'figure.edgecolor': (1, 1, 1, 0), 'figure.subplot.bottom': 0.125}
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MappingKernelManager.kernel_manager_class = 'IPython.kernel.ioloop.IOLoopKernelManager'
#
# c.MappingKernelManager.root_dir = '/home/uranus/Dropbox/PYTHON/IPnb'
#------------------------------------------------------------------------------
# NotebookManager configuration
#------------------------------------------------------------------------------
# Glob patterns to hide in file and directory listings.
# c.NotebookManager.hide_globs = ['__pycache__']
#------------------------------------------------------------------------------
# FileNotebookManager configuration
#------------------------------------------------------------------------------
# FileNotebookManager will inherit config from: NotebookManager
# Glob patterns to hide in file and directory listings.
# c.FileNotebookManager.hide_globs = ['__pycache__']
#
# c.FileNotebookManager.notebook_dir = '/home/uranus/Dropbox/PYTHON/IPnb'
# The directory name in which to keep notebook checkpoints
#
# This is a path relative to the notebook's own directory.
#
# By default, it is .ipynb_checkpoints
# c.FileNotebookManager.checkpoint_dir = '.ipynb_checkpoints'
# Automatically create a Python script when saving the notebook.
#
# For easier use of import, %run and %load across notebooks, a <notebook-
# name>.py script will be created next to any <notebook-name>.ipynb on each
# save. This can also be set with the short `--script` flag.
# c.FileNotebookManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = b''
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = ''
| bsd-2-clause |
etkirsch/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
neerajvashistha/pa-dude | lib/python2.7/site-packages/nltk/probability.py | 8 | 83570 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]> (additions)
# Trevor Cohn <[email protected]> (additions)
# Peter Ljunglöf <[email protected]> (additions)
# Liang Dong <[email protected]> (additions)
# Geoffrey Sampson <[email protected]> (additions)
# Ilia Kurenkov <[email protected]> (additions)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes for representing and processing probabilistic information.
The ``FreqDist`` class is used to encode "frequency distributions",
which count the number of times that each outcome of an experiment
occurs.
The ``ProbDistI`` class defines a standard interface for "probability
distributions", which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- "derived probability distributions" are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- "analytic probability distributions" are created directly from
parameters (such as variance).
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the ``ConditionalProbDistI`` interface is
``ConditionalProbDist``, a derived distribution.
"""
from __future__ import print_function, unicode_literals
import math
import random
import warnings
import array
from operator import itemgetter
from collections import defaultdict
from functools import reduce
from nltk import compat
from nltk.compat import Counter
from nltk.internals import raise_unorderable_types
_NINF = float('-1e300')
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class FreqDist(Counter):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> from nltk.tokenize import word_tokenize
>>> from nltk.probability import FreqDist
>>> sent = 'This is an example sentence'
>>> fdist = FreqDist()
>>> for word in word_tokenize(sent):
... fdist[word.lower()] += 1
An equivalent way to do this is with the initializer:
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
"""
def __init__(self, samples=None):
"""
Construct a new frequency distribution. If ``samples`` is
given, then the frequency distribution will be initialized
with the count of each object in ``samples``; otherwise, it
will be initialized to be empty.
In particular, ``FreqDist()`` returns an empty frequency
distribution; and ``FreqDist(samples)`` first creates an empty
frequency distribution, and then calls ``update`` with the
list ``samples``.
:param samples: The samples to initialize the frequency
distribution with.
:type samples: Sequence
"""
Counter.__init__(self, samples)
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this FreqDist. For the number of unique
sample values (or bins) with counts greater than zero, use
``FreqDist.B()``.
:rtype: int
"""
return sum(self.values())
def B(self):
"""
Return the total number of sample values (or "bins") that
have counts greater than zero. For the total
number of sample outcomes recorded, use ``FreqDist.N()``.
(FreqDist.B() is the same as len(FreqDist).)
:rtype: int
"""
return len(self)
def hapaxes(self):
"""
Return a list of all samples that occur once (hapax legomena)
:rtype: list
"""
return [item for item in self if self[item] == 1]
def Nr(self, r, bins=None):
return self.r_Nr(bins)[r]
def r_Nr(self, bins=None):
"""
Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0.
:type bins: int
:param bins: The number of possible sample outcomes. ``bins``
is used to calculate Nr(0). In particular, Nr(0) is
``bins-self.B()``. If ``bins`` is not specified, it
defaults to ``self.B()`` (so Nr(0) will be 0).
:rtype: int
"""
_r_Nr = defaultdict(int)
for count in self.values():
_r_Nr[count] += 1
# Special case for Nr[0]:
_r_Nr[0] = bins - self.B() if bins is not None else 0
return _r_Nr
def _cumulative_frequencies(self, samples):
"""
Return the cumulative frequencies of the specified samples.
If no samples are specified, all counts are returned, starting
with the largest.
:param samples: the samples whose frequencies should be returned.
:type samples: any
:rtype: list(float)
"""
cf = 0.0
for sample in samples:
cf += self[sample]
yield cf
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
# here, freq() does probs
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
:param sample: the sample whose frequency
should be returned.
:type sample: any
:rtype: float
"""
if self.N() == 0:
return 0
return float(self[sample]) / self.N()
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occurred in this
frequency distribution, return None.
:return: The sample with the maximum number of outcomes in this
frequency distribution.
:rtype: any or None
"""
if len(self) == 0:
raise ValueError('A FreqDist must have at least one sample before max is defined.')
return self.most_common(1)[0][0]
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the frequency distribution (cumulative),
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted.
:param samples: The samples to plot (default is all samples)
:type samples: list
"""
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
else:
freqs = [self[sample] for sample in samples]
# percents = [f * 100 for f in freqs] only in ProbDist?
for i in range(len(samples)):
print("%4s" % samples[i], end=' ')
print()
for i in range(len(samples)):
print("%4d" % freqs[i], end=' ')
print()
def copy(self):
"""
Create a copy of this frequency distribution.
:rtype: FreqDist
"""
return self.__class__(self)
def __le__(self, other):
if not isinstance(other, FreqDist):
raise_unorderable_types("<=", self, other)
return set(self).issubset(other) and all(self[key] <= other[key] for key in self)
# @total_ordering doesn't work here, since the class inherits from a builtin class
__ge__ = lambda self, other: not self <= other or self == other
__lt__ = lambda self, other: self <= other and not self == other
__gt__ = lambda self, other: not self <= other
def __repr__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return self.pformat()
def pprint(self, maxlen=10, stream=None):
"""
Print a string representation of this FreqDist to 'stream'
:param maxlen: The maximum number of items to print
:type maxlen: int
:param stream: The stream to print to. stdout by default
"""
print(self.pformat(maxlen=maxlen), file=stream)
def pformat(self, maxlen=10):
"""
Return a string representation of this FreqDist.
:param maxlen: The maximum number of items to display
:type maxlen: int
:rtype: string
"""
items = ['{0!r}: {1!r}'.format(*item) for item in self.most_common(maxlen)]
if len(self) > maxlen:
items.append('...')
return 'FreqDist({{{0}}})'.format(', '.join(items))
def __str__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N())
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. A ``ProbDist`` is often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
SUM_TO_ONE = True
"""True if the probabilities of the samples in this probability
distribution will always sum to one."""
def __init__(self):
if self.__class__ == ProbDistI:
raise NotImplementedError("Interfaces can't be instantiated")
def prob(self, sample):
"""
Return the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
raise NotImplementedError()
def logprob(self, sample):
"""
Return the base 2 logarithm of the probability for a given sample.
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
# Default definition, in terms of prob()
p = self.prob(sample)
return (math.log(p, 2) if p != 0 else _NINF)
def max(self):
"""
Return the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
:rtype: any
"""
raise NotImplementedError()
def samples(self):
"""
Return a list of all samples that have nonzero probabilities.
Use ``prob`` to find the probability of each sample.
:rtype: list
"""
raise NotImplementedError()
# cf self.SUM_TO_ONE
def discount(self):
"""
Return the ratio by which counts are discounted on average: c*/c
:rtype: float
"""
return 0.0
# Subclasses should define more efficient implementations of this,
# where possible.
def generate(self):
"""
Return a randomly selected sample from this probability distribution.
The probability of returning each sample ``samp`` is equal to
``self.prob(samp)``.
"""
p = random.random()
p_init = p
for sample in self.samples():
p -= self.prob(sample)
if p <= 0: return sample
# allow for some rounding error:
if p < .0001:
return sample
# we *should* never get here
if self.SUM_TO_ONE:
warnings.warn("Probability distribution %r sums to %r; generate()"
" is returning an arbitrary sample." % (self, p_init-p))
return random.choice(list(self.samples()))
@compat.python_2_unicode_compatible
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in ``samples``.
:param samples: The samples that should be given uniform
probability.
:type samples: list
:raise ValueError: If ``samples`` is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
return (self._prob if sample in self._sampleset else 0)
def max(self):
return self._samples[0]
def samples(self):
return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
@compat.python_2_unicode_compatible
class RandomProbDist(ProbDistI):
"""
Generates a random probability distribution whereby each sample
will be between 0 and 1 with equal probability (uniform random distribution.
Also called a continuous uniform distribution).
"""
def __init__(self, samples):
if len(samples) == 0:
raise ValueError('A probability distribution must '+
'have at least one sample.')
self._probs = self.unirand(samples)
self._samples = list(self._probs.keys())
@classmethod
def unirand(cls, samples):
"""
The key function that creates a randomized initial distribution
that still sums to 1. Set as a dictionary of prob values so that
it can still be passed to MutableProbDist and called with identical
syntax to UniformProbDist
"""
randrow = [random.random() for i in range(len(samples))]
total = sum(randrow)
for i, x in enumerate(randrow):
randrow[i] = x/total
total = sum(randrow)
if total != 1:
#this difference, if present, is so small (near NINF) that it
#can be subtracted from any element without risking probs not (0 1)
randrow[-1] -= total - 1
return dict((s, randrow[i]) for i, s in enumerate(samples))
def prob(self, sample):
return self._probs.get(sample, 0)
def samples(self):
return self._samples
def __repr__(self):
return '<RandomUniformProbDist with %d samples>' %len(self._probs)
@compat.python_2_unicode_compatible
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if ``log`` is true). If ``normalize`` is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
If called without arguments, the resulting probability
distribution assigns zero probability to all values.
"""
self._prob_dict = (prob_dict.copy() if prob_dict is not None else {})
self._log = log
# Normalize the distribution, if requested.
if normalize:
if len(prob_dict) == 0:
raise ValueError('A DictionaryProbDist must have at least one sample ' +
'before it can be normalized.')
if log:
value_sum = sum_logs(list(self._prob_dict.values()))
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict), 2)
for x in prob_dict:
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict)
for x in prob_dict:
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
return (2**(self._prob_dict[sample]) if sample in self._prob_dict else 0)
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
elif self._prob_dict[sample] == 0: return _NINF
else: return math.log(self._prob_dict[sample], 2)
def max(self):
if not hasattr(self, '_max'):
self._max = max((p,v) for (v,p) in self._prob_dict.items())[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
@compat.python_2_unicode_compatible
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"maximum likelihood estimate" approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
self._freqdist = freqdist
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is parameterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalent to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to parameterize the
estimate. The Lidstone estimate is equivalent to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s distribution ' % name +
'(%d) must be greater than or equal to\n' % bins +
'the number of bins in the FreqDist used ' +
'to create it (%d).' % freqdist.B())
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None:
bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Laplace estimate" approximates the probability of a sample with
count *c* from an experiment with *N* outcomes and *B* bins as
*(c+1)/(N+B)*. This is equivalent to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"expected likelihood estimate" approximates the probability of a
sample with count *c* from an experiment with *N* outcomes and
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
"heldout estimate" uses uses the "heldout frequency
distribution" to predict the probability of each sample, given its
frequency in the "base frequency distribution".
In particular, the heldout estimate approximates the probability
for a sample that occurs *r* times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur *r* times in the base distribution.
This average frequency is *Tr[r]/(Nr[r].N)*, where:
- *Tr[r]* is the total count in the heldout distribution for
all samples that occur *r* times in the base distribution.
- *Nr[r]* is the number of samples that occur *r* times in
the base distribution.
- *N* is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the ``prob`` member
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
when the ``HeldoutProbDist`` is created.
:type _estimate: list(float)
:ivar _estimate: A list mapping from *r*, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. ``_estimate[r]`` is
calculated by finding the average frequency in the heldout
distribution of all samples that occur *r* times in the base
distribution. In particular, ``_estimate[r]`` =
*Tr[r]/(Nr[r].N)*.
:type _max_r: int
:ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. ``_max_r`` is used to decide how
large ``_estimate`` must be.
"""
SUM_TO_ONE = False
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate ``base_fdist`` and
``heldout_fdist``.
:type base_fdist: FreqDist
:param base_fdist: The base frequency distribution.
:type heldout_fdist: FreqDist
:param heldout_fdist: The heldout frequency distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist[base_fdist.max()]
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
r_Nr = base_fdist.r_Nr(bins)
Nr = [r_Nr[r] for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
Return the list *Tr*, where *Tr[r]* is the total count in
``heldout_fdist`` for all samples that occur *r*
times in ``base_fdist``.
:rtype: list(float)
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist:
r = self._base_fdist[sample]
Tr[r] += self._heldout_fdist[sample]
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
Return the list *estimate*, where *estimate[r]* is the probability
estimate for any sample that occurs *r* times in the base frequency
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
In the special case that *N[r]=0*, *estimate[r]* will never be used;
so we define *estimate[r]=None* for those cases.
:rtype: list(float)
:type Tr: list(float)
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
the heldout distribution for all samples that occur *r*
times in base distribution.
:type Nr: list(float)
:param Nr: The list *Nr*, where *Nr[r]* is the number of
samples that occur *r* times in the base distribution.
:type N: int
:param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
Return the base frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._base_fdist
def heldout_fdist(self):
"""
Return the heldout frequency distribution that this
probability distribution is based on.
:rtype: FreqDist
"""
return self._heldout_fdist
def samples(self):
return self._base_fdist.keys()
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist[sample]
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
@compat.python_2_unicode_compatible
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The "cross-validation estimate" for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
SUM_TO_ONE = False
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
``freqdists``.
:type freqdists: list(FreqDist)
:param freqdists: A list of the frequency distributions
generated by the experiment.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
Return the list of frequency distributions that this ``ProbDist`` is based on.
:rtype: list(FreqDist)
"""
return self._freqdists
def samples(self):
# [xx] nb: this is not too efficient
return set(sum([list(fd) for fd in self._freqdists], []))
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
@compat.python_2_unicode_compatible
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occurring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once. The
probability mass reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occurring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
The parameters *T* and *N* are taken from the ``freqdist`` parameter
(the ``B()`` and ``N()`` values). The normalizing factor *Z* is
calculated using these values along with the ``bins`` parameter.
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'bins parameter must not be less than %d=freqdist.B()' % freqdist.B()
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
# self._P0 is P(0), precalculated for efficiency:
if self._N==0:
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
self._P0 = 1.0 / self._Z
else:
self._P0 = self._T / float(self._Z * (self._N + self._T))
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist[sample]
return (c / float(self._N + self._T) if c != 0 else self._P0)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Good-Turing Probability Distributions
##//////////////////////////////////////////////////////
# Good-Turing frequency estimation was contributed by Alan Turing and
# his statistical assistant I.J. Good, during their collaboration in
# the WWII. It is a statistical technique for predicting the
# probability of occurrence of objects belonging to an unknown number
# of species, given past observations of such objects and their
# species. (In drawing balls from an urn, the 'objects' would be balls
# and the 'species' would be the distinct colors of the balls (finite
# but unknown in number).
#
# Good-Turing method calculates the probability mass to assign to
# events with zero or low counts based on the number of events with
# higher counts. It does so by using the adjusted count *c\**:
#
# - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
# - *things with frequency zero in training* = N(1) for c == 0
#
# where *c* is the original count, *N(i)* is the number of event types
# observed with count *i*. We can think the count of unseen as the count
# of frequency one (see Jurafsky & Martin 2nd Edition, p101).
#
# This method is problematic because the situation ``N(c+1) == 0``
# is quite common in the original Good-Turing estimation; smoothing or
# interpolation of *N(i)* values is essential in practice.
#
# Bill Gale and Geoffrey Sampson present a simple and effective approach,
# Simple Good-Turing. As a smoothing curve they simply use a power curve:
#
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
#
# They estimate a and b by simple linear regression technique on the
# logarithmic form of the equation:
#
# log Nr = a + b*log(r)
#
# However, they suggest that such a simple curve is probably only
# appropriate for high values of r. For low values of r, they use the
# measured Nr directly. (see M&S, p.213)
#
# Gale and Sampson propose to use r while the difference between r and
# r* is 1.96 greater than the standard deviation, and switch to r* if
# it is less or equal:
#
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
#
# The 1.96 coefficient correspond to a 0.05 significance criterion,
# some implementations can use a coefficient of 1.65 for a 0.1
# significance criterion.
#
##//////////////////////////////////////////////////////
## Simple Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class SimpleGoodTuringProbDist(ProbDistI):
"""
SimpleGoodTuring ProbDist approximates from frequency to frequency of
frequency into a linear line under log space by linear regression.
Details of Simple Good-Turing algorithm can be found in:
- Good Turing smoothing without tears" (Gale & Sampson 1995),
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
- "Speech and Language Processing (Jurafsky & Martin),
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
- http://www.grsampson.net/RGoodTur.html
Given a set of pair (xi, yi), where the xi denotes the frequency and
yi denotes the frequency of frequency, we want to minimize their
square variation. E(x) and E(y) represent the mean of xi and yi.
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
- intercept: a = E(y) - b.E(x)
"""
SUM_TO_ONE = False
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be
larger than the number of bins in the ``freqdist``. If None,
then it's assumed to be equal to ``freqdist``.B() + 1
:type bins: int
"""
assert bins is None or bins > freqdist.B(),\
'bins parameter must not be less than %d=freqdist.B()+1' % (freqdist.B()+1)
if bins is None:
bins = freqdist.B() + 1
self._freqdist = freqdist
self._bins = bins
r, nr = self._r_Nr()
self.find_best_fit(r, nr)
self._switch(r, nr)
self._renormalize(r, nr)
def _r_Nr_non_zero(self):
r_Nr = self._freqdist.r_Nr()
del r_Nr[0]
return r_Nr
def _r_Nr(self):
"""
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
"""
nonzero = self._r_Nr_non_zero()
if not nonzero:
return [], []
return zip(*sorted(nonzero.items()))
def find_best_fit(self, r, nr):
"""
Use simple linear regression to tune parameters self._slope and
self._intercept in the log-log space based on count and Nr(count)
(Work in log space to avoid floating point underflow.)
"""
# For higher sample frequencies the data points becomes horizontal
# along line Nr=1. To create a more evident linear model in log-log
# space, we average positive Nr values with the surrounding zero
# values. (Church and Gale, 1991)
if not r or not nr:
# Empty r or nr?
return
zr = []
for j in range(len(r)):
i = (r[j-1] if j > 0 else 0)
k = (2 * r[j] - i if j == len(r) - 1 else r[j+1])
zr_ = 2.0 * nr[j] / (k - i)
zr.append(zr_)
log_r = [math.log(i) for i in r]
log_zr = [math.log(i) for i in zr]
xy_cov = x_var = 0.0
x_mean = 1.0 * sum(log_r) / len(log_r)
y_mean = 1.0 * sum(log_zr) / len(log_zr)
for (x, y) in zip(log_r, log_zr):
xy_cov += (x - x_mean) * (y - y_mean)
x_var += (x - x_mean)**2
self._slope = (xy_cov / x_var if x_var != 0 else 0.0)
if self._slope >= -1:
warnings.warn('SimpleGoodTuring did not find a proper best fit '
'line for smoothing probabilities of occurrences. '
'The probability estimates are likely to be '
'unreliable.')
self._intercept = y_mean - self._slope * x_mean
def _switch(self, r, nr):
"""
Calculate the r frontier where we must switch from Nr to Sr
when estimating E[Nr].
"""
for i, r_ in enumerate(r):
if len(r) == i + 1 or r[i+1] != r_ + 1:
# We are at the end of r, or there is a gap in r
self._switch_at = r_
break
Sr = self.smoothedNr
smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_)
unsmooth_r_star = 1.0 * (r_ + 1) * nr[i+1] / nr[i]
std = math.sqrt(self._variance(r_, nr[i], nr[i+1]))
if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std:
self._switch_at = r_
break
def _variance(self, r, nr, nr_1):
r = float(r)
nr = float(nr)
nr_1 = float(nr_1)
return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
def _renormalize(self, r, nr):
"""
It is necessary to renormalize all the probability estimates to
ensure a proper probability distribution results. This can be done
by keeping the estimate of the probability mass for unseen items as
N(1)/N and renormalizing all the estimates for previously seen items
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
"""
prob_cov = 0.0
for r_, nr_ in zip(r, nr):
prob_cov += nr_ * self._prob_measure(r_)
if prob_cov:
self._renormal = (1 - self._prob_measure(0)) / prob_cov
def smoothedNr(self, r):
"""
Return the number of samples with count r.
:param r: The amount of frequency.
:type r: int
:rtype: float
"""
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
# Estimate a and b by simple linear regression technique on
# the logarithmic form of the equation: log Nr = a + b*log(r)
return math.exp(self._intercept + self._slope * math.log(r))
def prob(self, sample):
"""
Return the sample's probability.
:param sample: sample of the event
:type sample: str
:rtype: float
"""
count = self._freqdist[sample]
p = self._prob_measure(count)
if count == 0:
if self._bins == self._freqdist.B():
p = 0.0
else:
p = p / (1.0 * self._bins - self._freqdist.B())
else:
p = p * self._renormal
return p
def _prob_measure(self, count):
if count == 0 and self._freqdist.N() == 0 :
return 1.0
elif count == 0 and self._freqdist.N() != 0:
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._switch_at > count:
Er_1 = 1.0 * self._freqdist.Nr(count+1)
Er = 1.0 * self._freqdist.Nr(count)
else:
Er_1 = self.smoothedNr(count+1)
Er = self.smoothedNr(count)
r_star = (count + 1) * Er_1 / Er
return r_star / self._freqdist.N()
def check(self):
prob_sum = 0.0
for i in range(0, len(self._Nr)):
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
print("Probability Sum:", prob_sum)
#assert prob_sum != 1.0, "probability sum should be one!"
def discount(self):
"""
This function returns the total mass of probability transfers from the
seen samples to the unseen samples.
"""
return 1.0 * self.smoothedNr(1) / self._freqdist.N()
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<SimpleGoodTuringProbDist based on %d samples>'\
% self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
:param prob_dist: the distribution from which to garner the
probabilities
:type prob_dist: ProbDist
:param samples: the complete set of samples
:type samples: sequence of any
:param store_logs: whether to store the probabilities as logarithms
:type store_logs: bool
"""
self._samples = samples
self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
self._data = array.array(str("d"), [0.0]) * len(samples)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return 0.0
return (2**(self._data[i]) if self._logs else self._data[i])
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return float('-inf')
return (self._data[i] if self._logs else math.log(self._data[i], 2))
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
:param sample: the sample for which to update the probability
:type sample: any
:param prob: the new probability
:type prob: float
:param log: is the probability already logged
:type log: bool
"""
i = self._sample_dict.get(sample)
assert i is not None
if self._logs:
self._data[i] = (prob if log else math.log(prob, 2))
else:
self._data[i] = (2**(prob) if log else prob)
##/////////////////////////////////////////////////////
## Kneser-Ney Probability Distribution
##//////////////////////////////////////////////////////
# This method for calculating probabilities was introduced in 1995 by Reinhard
# Kneser and Hermann Ney. It was meant to improve the accuracy of language
# models that use backing-off to deal with sparse data. The authors propose two
# ways of doing so: a marginal distribution constraint on the back-off
# distribution and a leave-one-out distribution. For a start, the first one is
# implemented as a class below.
#
# The idea behind a back-off n-gram model is that we have a series of
# frequency distributions for our n-grams so that in case we have not seen a
# given n-gram during training (and as a result have a 0 probability for it) we
# can 'back off' (hence the name!) and try testing whether we've seen the
# n-1-gram part of the n-gram in training.
#
# The novelty of Kneser and Ney's approach was that they decided to fiddle
# around with the way this latter, backed off probability was being calculated
# whereas their peers seemed to focus on the primary probability.
#
# The implementation below uses one of the techniques described in their paper
# titled "Improved backing-off for n-gram language modeling." In the same paper
# another technique is introduced to attempt to smooth the back-off
# distribution as well as the primary one. There is also a much-cited
# modification of this method proposed by Chen and Goodman.
#
# In order for the implementation of Kneser-Ney to be more efficient, some
# changes have been made to the original algorithm. Namely, the calculation of
# the normalizing function gamma has been significantly simplified and
# combined slightly differently with beta. None of these changes affect the
# nature of the algorithm, but instead aim to cut out unnecessary calculations
# and take advantage of storing and retrieving information in dictionaries
# where possible.
@compat.python_2_unicode_compatible
class KneserNeyProbDist(ProbDistI):
"""
Kneser-Ney estimate of a probability distribution. This is a version of
back-off that counts how likely an n-gram is provided the n-1-gram had
been seen in training. Extends the ProbDistI interface, requires a trigram
FreqDist instance to train on. Optionally, a different from default discount
value can be specified. The default discount is set to 0.75.
"""
def __init__(self, freqdist, bins=None, discount=0.75):
"""
:param freqdist: The trigram frequency distribution upon which to base
the estimation
:type freqdist: FreqDist
:param bins: Included for compatibility with nltk.tag.hmm
:type bins: int or float
:param discount: The discount applied when retrieving counts of
trigrams
:type discount: float (preferred, but can be set to int)
"""
if not bins:
self._bins = freqdist.B()
else:
self._bins = bins
self._D = discount
# cache for probability calculation
self._cache = {}
# internal bigram and trigram frequency distributions
self._bigrams = defaultdict(int)
self._trigrams = freqdist
# helper dictionaries used to calculate probabilities
self._wordtypes_after = defaultdict(float)
self._trigrams_contain = defaultdict(float)
self._wordtypes_before = defaultdict(float)
for w0, w1, w2 in freqdist:
self._bigrams[(w0,w1)] += freqdist[(w0, w1, w2)]
self._wordtypes_after[(w0,w1)] += 1
self._trigrams_contain[w1] += 1
self._wordtypes_before[(w1,w2)] += 1
def prob(self, trigram):
# sample must be a triple
if len(trigram) != 3:
raise ValueError('Expected an iterable with 3 members.')
trigram = tuple(trigram)
w0, w1, w2 = trigram
if trigram in self._cache:
return self._cache[trigram]
else:
# if the sample trigram was seen during training
if trigram in self._trigrams:
prob = (self._trigrams[trigram]
- self.discount())/self._bigrams[(w0, w1)]
# else if the 'rougher' environment was seen during training
elif (w0,w1) in self._bigrams and (w1,w2) in self._wordtypes_before:
aftr = self._wordtypes_after[(w0, w1)]
bfr = self._wordtypes_before[(w1, w2)]
# the probability left over from alphas
leftover_prob = ((aftr * self.discount())
/ self._bigrams[(w0, w1)])
# the beta (including normalization)
beta = bfr /(self._trigrams_contain[w1] - aftr)
prob = leftover_prob * beta
# else the sample was completely unseen during training
else:
prob = 0.0
self._cache[trigram] = prob
return prob
def discount(self):
"""
Return the value by which counts are discounted. By default set to 0.75.
:rtype: float
"""
return self._D
def set_discount(self, discount):
"""
Set the value by which counts are discounted to the value of discount.
:param discount: the new value to discount counts by
:type discount: float (preferred, but int possible)
:rtype: None
"""
self._D = discount
def samples(self):
return self._trigrams.keys()
def max(self):
return self._trigrams.max()
def __repr__(self):
'''
Return a string representation of this ProbDist
:rtype: str
'''
return '<KneserNeyProbDist based on {0} trigrams'.format(self._trigrams.N())
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
if (not isinstance(test_pdist, ProbDistI) or
not isinstance(actual_pdist, ProbDistI)):
raise ValueError('expected a ProbDist.')
# Is this right?
return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2)
for s in actual_pdist)
def entropy(pdist):
probs = (pdist.prob(s) for s in pdist.samples())
return -sum(p * math.log(p,2) for p in probs)
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition][word] += 1
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
FreqDist({'the': 3, 'dog': 2, 'not': 1})
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond][sample] += 1
def __reduce__(self):
kv_pairs = ((cond, self[cond]) for cond in self.conditions())
return (self.__class__, (), None, None, kv_pairs)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return list(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in compat.itervalues(self))
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = "%s" % condition
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
condition_size = max(len("%s" % c) for c in conditions)
print(' ' * condition_size, end=' ')
for s in samples:
print("%4s" % s, end=' ')
print()
for c in conditions:
print("%*s" % (condition_size, c), end=' ')
if cumulative:
freqs = list(self[c]._cumulative_frequencies(samples))
else:
freqs = [self[c][sample] for sample in samples]
for f in freqs:
print("%4d" % f, end=' ')
print()
# @total_ordering doesn't work here, since the class inherits from a builtin class
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<=", self, other)
return set(self.conditions()).issubset(other.conditions()) \
and all(self[c] <= other[c] for c in self.conditions())
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<", self, other)
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">=", self, other)
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">", self, other)
return other < self
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return '<ConditionalFreqDist with %d conditions>' % len(self)
@compat.python_2_unicode_compatible
class ConditionalProbDistI(dict):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the ``ProbDist`` for the experiment under that
condition.
"""
def __init__(self):
raise NotImplementedError("Interfaces can't be instantiated")
def conditions(self):
"""
Return a list of the conditions that are represented by
this ``ConditionalProbDist``. Use the indexing operator to
access the probability distribution for a given condition.
:rtype: list
"""
return list(self.keys())
def __repr__(self):
"""
Return a string representation of this ``ConditionalProbDist``.
:rtype: str
"""
return '<%s with %d conditions>' % (type(self).__name__, len(self))
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modeling the experiments
that were used to generate a conditional frequency distribution.
A ConditionalProbDist is constructed from a
``ConditionalFreqDist`` and a ``ProbDist`` factory:
- The ``ConditionalFreqDist`` specifies the frequency
distribution for each condition.
- The ``ProbDist`` factory is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A ``ProbDist`` class's name (such as
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
that class's constructor.
The first argument to the ``ProbDist`` factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the ``factory_args`` parameter to the
``ConditionalProbDist`` constructor. For example, the following
code constructs a ``ConditionalProbDist``, where the probability
distribution for each condition is an ``ELEProbDist`` with 10 bins:
>>> from nltk.corpus import brown
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
>>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000])
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> cpdist['passed'].max()
'VBD'
>>> cpdist['passed'].prob('VBD')
0.423...
"""
def __init__(self, cfdist, probdist_factory,
*factory_args, **factory_kw_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and ``ProbDist``
factory.
:type cfdist: ConditionalFreqDist
:param cfdist: The ``ConditionalFreqDist`` specifying the
frequency distribution for each condition.
:type probdist_factory: class or function
:param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument,
``factory_args`` as its remaining arguments, and
``factory_kw_args`` as keyword arguments.
:type factory_args: (any)
:param factory_args: Extra arguments for ``probdist_factory``.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
:type factory_kw_args: (any)
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
"""
self._probdist_factory = probdist_factory
self._factory_args = factory_args
self._factory_kw_args = factory_kw_args
for condition in cfdist:
self[condition] = probdist_factory(cfdist[condition],
*factory_args, **factory_kw_args)
def __missing__(self, key):
self[key] = self._probdist_factory(FreqDist(),
*self._factory_args,
**self._factory_kw_args)
return self[key]
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
:param probdist_dict: a dictionary containing the probdists indexed
by the conditions
:type probdist_dict: dict any -> probdist
"""
self.update(probdist_dict)
def __missing__(self, key):
self[key] = DictionaryProbDist()
return self[key]
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
def add_logs(logx, logy):
"""
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
*log(x+y)*. Conceptually, this is the same as returning
``log(2**(logx)+2**(logy))``, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(2**(logx-base) + 2**(logy-base), 2)
def sum_logs(logs):
return (reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF)
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> from nltk.probability import ProbabilisticMixIn
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
``constructor<__init__>`` for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. ``prob`` should generally be
the first argument for those constructors.
:param prob: The probability associated with the object.
:type prob: float
:param logprob: The log of the probability associated with
the object.
:type logprob: float
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to ``prob``.
:param prob: The new probability
:type prob: float
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
``logprob``. I.e., set the probability associated with this
object to ``2**(logprob)``.
:param logprob: The new log probability
:type logprob: float
"""
self.__logprob = logprob
self.__prob = None
def prob(self):
"""
Return the probability associated with this object.
:rtype: float
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = 2**(self.__logprob)
return self.__prob
def logprob(self):
"""
Return ``log(p)``, where ``p`` is the probability associated
with this object.
:rtype: float
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob, 2)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
def set_logprob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
## Helper function for processing keyword arguments
def _get_kwarg(kwargs, key, default):
if key in kwargs:
arg = kwargs[key]
del kwargs[key]
else:
arg = default
return arg
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to ``numsamples``, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1 + numsamples) // 2) +
random.randint(0, numsamples // 2))
fdist[y] += 1
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
``_create_rand_fdist(numsamples, x)``.
"""
fdist = FreqDist()
for x in range(1, (1 + numsamples) // 2 + 1):
for y in range(0, numsamples // 2 + 1):
fdist[x+y] += 1
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print(('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes)))
print('='*9*(len(pdists)+2))
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1]))
print('-'*9*(len(pdists)+2))
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print(FORMATSTR % val)
# Print the totals for each column (should all be 1.0)
zvals = list(zip(*vals))
sums = [sum(val) for val in zvals[1:]]
print('-'*9*(len(pdists)+2))
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print(FORMATSTR % tuple(sums))
print('='*9*(len(pdists)+2))
# Display the distributions themselves, if they're short enough.
if len("%s" % fdist1) < 70:
print(' fdist1: %s' % fdist1)
print(' fdist2: %s' % fdist2)
print(' fdist3: %s' % fdist3)
print()
print('Generating:')
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print('%20s %s' % (pdist.__class__.__name__[:20], ("%s" % fdist)[:55]))
print()
def gt_demo():
from nltk import corpus
emma_words = corpus.gutenberg.words('austen-emma.txt')
fd = FreqDist(emma_words)
sgt = SimpleGoodTuringProbDist(fd)
print('%18s %8s %14s' \
% ("word", "freqency", "SimpleGoodTuring"))
fd_keys_sorted=(key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True))
for key in fd_keys_sorted:
print('%18s %8d %14e' \
% (key, fd[key], sgt.prob(key)))
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
gt_demo()
__all__ = ['ConditionalFreqDist', 'ConditionalProbDist',
'ConditionalProbDistI', 'CrossValidationProbDist',
'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist',
'FreqDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist',
'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist',
'MLEProbDist', 'MutableProbDist', 'KneserNeyProbDist', 'ProbDistI', 'ProbabilisticMixIn',
'UniformProbDist', 'WittenBellProbDist', 'add_logs',
'log_likelihood', 'sum_logs', 'entropy']
| mit |
soxofaan/luigi | luigi/contrib/pai.py | 2 | 11175 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Open Targets
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
MicroSoft OpenPAI Job wrapper for Luigi.
"OpenPAI is an open source platform that provides complete AI model training and resource management capabilities,
it is easy to extend and supports on-premise, cloud and hybrid environments in various scale."
For more information about OpenPAI : https://github.com/Microsoft/pai/, this task is tested against OpenPAI 0.7.1
Requires:
- requests: ``pip install requests``
Written and maintained by Liu, Dongqing (@liudongqing).
"""
import time
import logging
import luigi
import abc
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
import json
logger = logging.getLogger('luigi-interface')
try:
import requests as rs
from requests.exceptions import HTTPError
except ImportError:
logger.warning('requests is not installed. PaiTask requires requests.')
def slot_to_dict(o):
o_dict = {}
for key in o.__slots__:
if not key.startswith('__'):
value = getattr(o, key, None)
if value is not None:
o_dict[key] = value
return o_dict
class PaiJob(object):
"""
The Open PAI job definition.
Refer to here https://github.com/Microsoft/pai/blob/master/docs/job_tutorial.md
::
{
"jobName": String,
"image": String,
"authFile": String,
"dataDir": String,
"outputDir": String,
"codeDir": String,
"virtualCluster": String,
"taskRoles": [
{
"name": String,
"taskNumber": Integer,
"cpuNumber": Integer,
"memoryMB": Integer,
"shmMB": Integer,
"gpuNumber": Integer,
"portList": [
{
"label": String,
"beginAt": Integer,
"portNumber": Integer
}
],
"command": String,
"minFailedTaskCount": Integer,
"minSucceededTaskCount": Integer
}
],
"gpuType": String,
"retryCount": Integer
}
"""
__slots__ = (
'jobName', 'image', 'authFile', 'dataDir', 'outputDir', 'codeDir', 'virtualCluster',
'taskRoles', 'gpuType', 'retryCount'
)
def __init__(self, jobName, image, tasks):
"""
Initialize a Job with required fields.
:param jobName: Name for the job, need to be unique
:param image: URL pointing to the Docker image for all tasks in the job
:param tasks: List of taskRole, one task role at least
"""
self.jobName = jobName
self.image = image
if isinstance(tasks, list) and len(tasks) != 0:
self.taskRoles = tasks
else:
raise TypeError('you must specify one task at least.')
class Port(object):
__slots__ = ('label', 'beginAt', 'portNumber')
def __init__(self, label, begin_at=0, port_number=1):
"""
The Port definition for TaskRole
:param label: Label name for the port type, required
:param begin_at: The port to begin with in the port type, 0 for random selection, required
:param port_number: Number of ports for the specific type, required
"""
self.label = label
self.beginAt = begin_at
self.portNumber = port_number
class TaskRole(object):
__slots__ = (
'name', 'taskNumber', 'cpuNumber', 'memoryMB', 'shmMB', 'gpuNumber', 'portList', 'command',
'minFailedTaskCount', 'minSucceededTaskCount'
)
def __init__(self, name, command, taskNumber=1, cpuNumber=1, memoryMB=2048, shmMB=64, gpuNumber=0, portList=[]):
"""
The TaskRole of PAI
:param name: Name for the task role, need to be unique with other roles, required
:param command: Executable command for tasks in the task role, can not be empty, required
:param taskNumber: Number of tasks for the task role, no less than 1, required
:param cpuNumber: CPU number for one task in the task role, no less than 1, required
:param shmMB: Shared memory for one task in the task role, no more than memory size, required
:param memoryMB: Memory for one task in the task role, no less than 100, required
:param gpuNumber: GPU number for one task in the task role, no less than 0, required
:param portList: List of portType to use, optional
"""
self.name = name
self.command = command
self.taskNumber = taskNumber
self.cpuNumber = cpuNumber
self.memoryMB = memoryMB
self.shmMB = shmMB
self.gpuNumber = gpuNumber
self.portList = portList
class OpenPai(luigi.Config):
pai_url = luigi.Parameter(
default='http://127.0.0.1:9186',
description='rest server url, default is http://127.0.0.1:9186')
username = luigi.Parameter(
default='admin',
description='your username')
password = luigi.Parameter(
default=None,
description='your password')
expiration = luigi.IntParameter(
default=3600,
description='expiration time in seconds')
class PaiTask(luigi.Task):
__POLL_TIME = 5
@abc.abstractproperty
def name(self):
"""Name for the job, need to be unique, required"""
return 'SklearnExample'
@abc.abstractproperty
def image(self):
"""URL pointing to the Docker image for all tasks in the job, required"""
return 'openpai/pai.example.sklearn'
@abc.abstractproperty
def tasks(self):
"""List of taskRole, one task role at least, required"""
return []
@property
def auth_file_path(self):
"""Docker registry authentication file existing on HDFS, optional"""
return None
@property
def data_dir(self):
"""Data directory existing on HDFS, optional"""
return None
@property
def code_dir(self):
"""Code directory existing on HDFS, should not contain any data and should be less than 200MB, optional"""
return None
@property
def output_dir(self):
"""Output directory on HDFS, $PAI_DEFAULT_FS_URI/$jobName/output will be used if not specified, optional"""
return '$PAI_DEFAULT_FS_URI/{0}/output'.format(self.name)
@property
def virtual_cluster(self):
"""The virtual cluster job runs on. If omitted, the job will run on default virtual cluster, optional"""
return 'default'
@property
def gpu_type(self):
"""Specify the GPU type to be used in the tasks. If omitted, the job will run on any gpu type, optional"""
return None
@property
def retry_count(self):
"""Job retry count, no less than 0, optional"""
return 0
def __init_token(self):
self.__openpai = OpenPai()
request_json = json.dumps({'username': self.__openpai.username, 'password': self.__openpai.password,
'expiration': self.__openpai.expiration})
logger.debug('Get token request {0}'.format(request_json))
response = rs.post(urljoin(self.__openpai.pai_url, '/api/v1/token'),
headers={'Content-Type': 'application/json'}, data=request_json)
logger.debug('Get token response {0}'.format(response.text))
if response.status_code != 200:
msg = 'Get token request failed, response is {}'.format(response.text)
logger.error(msg)
raise Exception(msg)
else:
self.__token = response.json()['token']
def __init__(self, *args, **kwargs):
"""
:param pai_url: The rest server url of PAI clusters, default is 'http://127.0.0.1:9186'.
:param token: The toke used to auth the rest server of PAI.
"""
super(PaiTask, self).__init__(*args, **kwargs)
self.__init_token()
def __check_job_status(self):
response = rs.get(urljoin(self.__openpai.pai_url, '/api/v1/jobs/{0}'.format(self.name)))
logger.debug('Check job response {0}'.format(response.text))
if response.status_code == 404:
msg = 'Job {0} is not found'.format(self.name)
logger.debug(msg)
raise HTTPError(msg, response=response)
elif response.status_code != 200:
msg = 'Get job request failed, response is {}'.format(response.text)
logger.error(msg)
raise HTTPError(msg, response=response)
job_state = response.json()['jobStatus']['state']
if job_state in ['UNKNOWN', 'WAITING', 'RUNNING']:
logger.debug('Job {0} is running in state {1}'.format(self.name, job_state))
return False
else:
msg = 'Job {0} finished in state {1}'.format(self.name, job_state)
logger.info(msg)
if job_state == 'SUCCEED':
return True
else:
raise RuntimeError(msg)
def run(self):
job = PaiJob(self.name, self.image, self.tasks)
job.virtualCluster = self.virtual_cluster
job.authFile = self.auth_file_path
job.codeDir = self.code_dir
job.dataDir = self.data_dir
job.outputDir = self.output_dir
job.retryCount = self.retry_count
job.gpuType = self.gpu_type
request_json = json.dumps(job, default=slot_to_dict)
logger.debug('Submit job request {0}'.format(request_json))
response = rs.post(urljoin(self.__openpai.pai_url, '/api/v1/jobs'),
headers={'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(self.__token)}, data=request_json)
logger.debug('Submit job response {0}'.format(response.text))
# 202 is success for job submission, see https://github.com/Microsoft/pai/blob/master/docs/rest-server/API.md
if response.status_code != 202:
msg = 'Submit job failed, response code is {0}, body is {1}'.format(response.status_code, response.text)
logger.error(msg)
raise HTTPError(msg, response=response)
while not self.__check_job_status():
time.sleep(self.__POLL_TIME)
def output(self):
return luigi.contrib.hdfs.HdfsTarget(self.output())
def complete(self):
try:
return self.__check_job_status()
except HTTPError:
return False
except RuntimeError:
return False
| apache-2.0 |
scienceopen/msise00 | Examples/suntest.py | 1 | 1208 | #!/usr/bin/env python
"""
understanding sun apparent position over Earth in geodetic lat,lon
"""
from astropy.time import Time
from astropy.coordinates import get_sun, AltAz, EarthLocation
import astropy.units as u
import numpy as np
from matplotlib.pyplot import figure, show
from pymap3d import aer2geodetic
obslla = (0, 0, 0)
midnight = Time("2015-01-01T00:00")
delta_midnight = np.linspace(0, 365, 1000) * u.day
time = midnight + delta_midnight
obs = EarthLocation(lat=obslla[0], lon=obslla[1], height=obslla[2])
sun = get_sun(time=time)
aaf = AltAz(obstime=time, location=obs)
sloc = sun.transform_to(aaf)
# %%
time = time.to_datetime()
fg = figure()
ax = fg.subplots(2, 1, sharex=True)
ax[0].plot(time, sloc.alt)
ax[0].set_title("sun elevation")
ax[0].set_ylabel("elevation [deg]")
ax[1].plot(time, sloc.az)
ax[1].set_title("sun azimuth")
ax[1].set_ylabel("azimuth [deg]")
ax[1].set_xlabel("time")
fg.suptitle(f"sun over 1 year @ lat,lon,alt: {obslla}")
# %%
lat, lon, alt = aer2geodetic(sloc.az.value, sloc.alt.value, sloc.distance.value, *obslla)
ax = figure().gca()
ax.plot(time, lat)
ax.set_title("subsolar latitude vs. time")
ax.set_ylabel("latitude [deg]")
ax.set_xlabel("time")
show()
| mit |
harshaneelhg/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
rsignell-usgs/notebook | ERDDAP/ERDDAP_MPALA.py | 1 | 2117 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# #Read data from MPALA ERDDAP
# Exploring use of Python to formulate ERDDAP data requests and process the responses.
# <codecell>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# <markdowncell>
# If you go to the ERDDAP TableDap page, you can select which variables you want, the time ranges, etc, and then select how you want to download the data. You can either then download the data, or just copy the URL that would download the data. That URL can therefore be used as the basis of a custom data query, as shown below. We simply generated a URL, then replaced the time and data requested with python variables.
#
# Note: If you just see a blank box below, you might have to tell your browser to "allow unsafe script". In Chrome it's a little shield that shows up on the right hand side of the address bar.
# <codecell>
from IPython.display import IFrame
IFrame('http://geoport.whoi.edu/erddap/tabledap/tower_65ce_ba2b_9a66.html', width='100%', height=450)
# <codecell>
#select the variables you want
vars='Tsoil10cmGrass_Avg,Tsoil20cmGass_Avg'
# Use ERDDAP's built-in relative time functionality to get last 48 hours:
start='now-7days'
stop='now'
# or specify a specific period:
start = '2013-05-06T00:00:00Z'
stop = '2013-06-07T00:00:00Z'
# <codecell>
#construct the ERDDAP URL
url='http://geoport.whoi.edu/erddap/tabledap/tower_65ce_ba2b_9a66.csvp?\
time,%s&time>=%s&time<=%s' % (vars,start,stop)
df = pd.read_csv(url,index_col=0,parse_dates=True)
# <codecell>
df.plot(figsize=(12,4));
# <codecell>
# List last ten records
df.tail(10)
# <codecell>
df.describe()
# <markdowncell>
# Use ERDDAP to make a plot
# <codecell>
from IPython.display import Image
url='http://geoport.whoi.edu/erddap/tabledap/tower_65ce_ba2b_9a66.png?time,TsoilOpen_Avg&time%3E=2014-08-27T00:00:00Z&time%3C=2014-09-03T00:00:00Z&.draw=lines&.color=0x000000'
Image(url=url,format=u'png')
# <codecell>
!git push
# <codecell>
pwd
# <codecell>
cd /usgs/data2/notebook/ERDDAP
# <codecell>
| mit |
bbfamily/abu | abupy/MLBu/ABuMLBinsCs.py | 1 | 4273 | # -*- encoding:utf-8 -*-
"""直观可视化制作qcut的bins点"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import math
import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import itertools
# noinspection PyUnresolvedReferences
from ..CoreBu.ABuFixes import filter
__all__ = ['show_orders_hist']
def show_orders_hist(df, feature_columns, show=True, only_hist=True, show_pie=False):
"""
可视化统计feature_columns序列所指定的特征在df中的直方图或者饼状图,
根据参数only_hist是否进行pd.qcut统计输出
eg:
from abupy import AbuML, ml
ttn_raw = AbuML.load_ttn_raw_df()
ml.show_orders_hist(ttn_raw, ['Age', 'Fare', 'Pclass'])
:param df: pd.DataFrame对象
:param feature_columns: 特征名称序列,eg:['Age', 'Fare', 'Pclass']
:param show: 是否可视化直方图或者饼状图
:param show_pie: 是否优先考虑绘制饼状图,默认false
:param only_hist: 是否进行pd.qcut统计输出
"""
if not isinstance(df, pd.DataFrame):
logging.info('df must pd.DataFrame, not type {}'.format(type(df)))
return
# 第一步过滤不在在特征列中的feature_columns元素
feature_columns = list(filter(lambda x: df.columns.tolist().count(x) > 0, feature_columns))
# 第二步过滤feature_columns元素中类型不是int或者float的
feature_columns = list(
filter(
lambda x: df[x].dtype == int or df[x].dtype == float or df[x].dtype == np.uint or df[x].dtype == np.uint8,
feature_columns))
# 第三步过滤feature_columns元素中所指特征列中unique==1的,eg:1列全是1,全是0,没办法做bin
feature_columns = list(filter(lambda x: len(np.unique(df[x])) > 1, feature_columns))
axs_list = None
if len(feature_columns) == 0:
# 晒没了的情况,直接返回
logging.info('{}\n{}\nnot exist! or unique==1!, or dtype != int or float'.format(
df.columns, df.dtypes))
return
if show:
# 如果可视化直方图,先确定子画布列数,一行放两个,取math.ceil,eg:3 /2 = 2
n_rows = int(math.ceil(len(feature_columns) / 2))
# 行高取5,总高度:n_rows * 5
fig_h = n_rows * 5
# plt.subplots生成子画布
_, axs = plt.subplots(nrows=n_rows, ncols=2, figsize=(14, fig_h))
# 如果是多于1个的即展开字画本序列为1d序列
axs_list = axs if n_rows == 1 else list(itertools.chain.from_iterable(axs))
for ind, feature in enumerate(feature_columns):
feature_unique = len(np.unique(df[feature]))
ax = None
if axs_list is not None:
ax = axs_list[ind]
ax.set_title(feature)
if show_pie and feature_unique < 10:
# 如果特征的值unique < 10个,通过value_counts直接画饼图
df[feature].value_counts().plot(ax=ax, kind='pie')
else:
# 画直方图
bins = int(feature_unique / 50) if feature_unique / 50 > 10 else 10
df[feature].hist(ax=ax, bins=bins)
if only_hist:
# 只做可视化就continue
continue
try:
# qcut切分10等份
cats = pd.qcut(df[feature], 10)
except Exception:
# 某一个数据超出q的数量导致无法分
import pandas.core.algorithms as algos
bins = algos.quantile(np.unique(df[feature]), np.linspace(0, 1, 10 + 1))
# noinspection PyProtectedMember,PyUnresolvedReferences
cats = pd.tools.tile._bins_to_cuts(df[feature], bins, include_lowest=True)
logging.info('{0} show hist and qcuts'.format(feature))
"""
Age show hist and qcuts
(31.8, 36] 91
(14, 19] 87
(41, 50] 78
[0.42, 14] 77
(22, 25] 70
(19, 22] 67
(28, 31.8] 66
(50, 80] 64
(25, 28] 61
(36, 41] 53
Name: Age, dtype: int64
"""
logging.info(cats.value_counts())
| gpl-3.0 |
DonBeo/scikit-learn | sklearn/mixture/gmm.py | 9 | 27514 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc'):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit(self, X, y=None):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
for _ in range(self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when dreprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
# self.n_iter == 0 occurs when using GMM within HMM
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
humm/dmpbbo | src/bbo/plotting/plotUpdateSummary.py | 2 | 4764 | import sys
import numpy
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
import os
import matplotlib.pyplot as pl
from matplotlib.patches import Ellipse
import time
#from matplotlib import animation
# From https://github.com/dfm/dfmplot/blob/master/dfmplot/ellipse.py
def plot_error_ellipse(mu, cov, ax=None, **kwargs):
"""
Plot the error ellipse at a point given it's covariance matrix
Parameters
----------
mu : array (2,)
The center of the ellipse
cov : array (2,2)
The covariance matrix for the point
ax : matplotlib.Axes, optional
The axis to overplot on
**kwargs : dict
These keywords are passed to matplotlib.patches.Ellipse
"""
# some sane defaults
facecolor = kwargs.pop('facecolor', 'none')
edgecolor = kwargs.pop('edgecolor', 'k')
x, y = mu
U,S,V = np.linalg.svd(cov)
theta = np.degrees(np.arctan2(U[1,0], U[0,0]))
ellipsePlot = Ellipse(xy=[x, y],
width = 2*np.sqrt(S[0]),
height= 2*np.sqrt(S[1]),
angle=theta,
facecolor=facecolor, edgecolor=edgecolor, **kwargs)
if ax is None:
ax = pl.gca()
lines = ax.add_patch(ellipsePlot)
return lines
def plotUpdateSummary(distribution_mean,distribution_covar,samples,costs,weights,distribution_new_mean,distribution_new_covar,ax,highlight=True,plot_samples=False):
n_dims = len(distribution_mean);
if (n_dims==1):
print "Sorry, only know how to plot for n_dims==2, but you provided n_dims==1"
return
if (n_dims>2):
#print "Sorry, only know how to plot for n_dims==2, throwing away excess dimensions"
distribution_mean = distribution_mean[0:2]
distribution_covar = distribution_covar[0:2,0:2]
distribution_new_mean = distribution_new_mean[0:2]
distribution_new_covar = distribution_new_covar[0:2,0:2]
samples = samples[:,0:2]
if (plot_samples):
max_marker_size = 80;
for ii in range(len(weights)):
cur_marker_size = max_marker_size*weights[ii]
sample_handle = ax.plot(samples[ii,0],samples[ii,1],'o',color='green')
plt.setp(sample_handle,markersize=cur_marker_size,markerfacecolor=(0.5,0.8,0.5),markeredgecolor='none')
ax.plot(samples[:,0],samples[:,1],'.',color='black')
ax.plot((distribution_mean[0],distribution_new_mean[0]),(distribution_mean[1],distribution_new_mean[1]),'-',color='blue')
mean_handle = ax.plot(distribution_mean[0],distribution_mean[1],'o',label='old')
mean_handle_new = ax.plot(distribution_new_mean[0],distribution_new_mean[1],'o',label='new')
mean_handle_link = ax.plot([distribution_mean[0], distribution_new_mean[0]],[distribution_mean[1], distribution_new_mean[1]],'-')
patch = plot_error_ellipse(distribution_mean[0:2],distribution_covar[0:2,0:2],ax)
patch_new = plot_error_ellipse(distribution_new_mean[0:2],distribution_new_covar[0:2,0:2],ax)
if (highlight):
plt.setp(mean_handle,color='red')
plt.setp(mean_handle_new,color='blue')
plt.setp(patch,edgecolor='red')
plt.setp(patch_new,edgecolor='blue')
else:
plt.setp(mean_handle,color='gray')
plt.setp(mean_handle_new,color='gray')
plt.setp(patch,edgecolor='gray')
plt.setp(patch_new,edgecolor='gray')
plt.setp(mean_handle_link,color='gray')
ax.set_aspect('equal')
plt.rcParams['text.usetex']=True
ax.set_xlabel(r'$\theta_1$')
ax.set_ylabel(r'$\theta_2$')
return mean_handle,mean_handle_new,patch,patch_new
def plotUpdateSummaryFromDirectory(directory,ax,highlight=True,plot_samples=False):
# Read data
distribution_mean = np.loadtxt(directory+"/distribution_mean.txt")
distribution_covar = np.loadtxt(directory+"/distribution_covar.txt")
samples = np.loadtxt(directory+"/samples.txt")
costs = np.loadtxt(directory+"/costs.txt")
weights = np.loadtxt(directory+"/weights.txt")
distribution_new_mean = np.loadtxt(directory+"/distribution_new_mean.txt")
distribution_new_covar = np.loadtxt(directory+"/distribution_new_covar.txt")
plotUpdateSummary(distribution_mean,distribution_covar,samples,costs,weights,distribution_new_mean,distribution_new_covar,ax,highlight,plot_samples)
if __name__=='__main__':
# See if input directory was passed
if (len(sys.argv)==2):
directory = str(sys.argv[1])
else:
print '\nUsage: '+sys.argv[0]+' <directory>\n';
sys.exit()
fig = plt.figure()
ax = fig.gca()
highlight = True
plot_samples = True
plotUpdateSummaryFromDirectory(directory,ax,highlight,plot_samples)
plt.show()
| gpl-2.0 |
ronalcc/zipline | zipline/history/history.py | 20 | 12233 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import numpy as np
import pandas as pd
import re
from zipline.finance import trading
from zipline.finance.trading import with_environment
from zipline.errors import IncompatibleHistoryFrequency
def parse_freq_str(freq_str):
# TODO: Wish we were more aligned with pandas here.
num_str, unit_str = re.match('([0-9]+)([A-Za-z]+)', freq_str).groups()
return int(num_str), unit_str
class Frequency(object):
"""
Represents how the data is sampled, as specified by the algoscript
via units like "1d", "1m", etc.
Currently only two frequencies are supported, "1d" and "1m"
- "1d" provides data at daily frequency, with the latest bar aggregating
the elapsed minutes of the (incomplete) current day
- "1m" provides data at minute frequency
"""
SUPPORTED_FREQUENCIES = frozenset({'1d', '1m'})
MAX_MINUTES = {'m': 1, 'd': 390}
MAX_DAYS = {'d': 1}
def __init__(self, freq_str, data_frequency):
if freq_str not in self.SUPPORTED_FREQUENCIES:
raise ValueError(
"history frequency must be in {supported}".format(
supported=self.SUPPORTED_FREQUENCIES,
))
# The string the at the algoscript specifies.
# Hold onto to use a key for caching.
self.freq_str = freq_str
# num - The number of units of the frequency.
# unit_str - The unit type, e.g. 'd'
self.num, self.unit_str = parse_freq_str(freq_str)
self.data_frequency = data_frequency
def next_window_start(self, previous_window_close):
"""
Get the first minute of the window starting after a window that
finished on @previous_window_close.
"""
if self.unit_str == 'd':
return self.next_day_window_start(previous_window_close,
self.data_frequency)
elif self.unit_str == 'm':
return self.next_minute_window_start(previous_window_close)
@staticmethod
def next_day_window_start(previous_window_close, data_frequency='minute'):
"""
Get the next day window start after @previous_window_close. This is
defined as the first market open strictly greater than
@previous_window_close.
"""
env = trading.environment
if data_frequency == 'daily':
next_open = env.next_trading_day(previous_window_close)
else:
next_open = env.next_market_minute(previous_window_close)
return next_open
@staticmethod
def next_minute_window_start(previous_window_close):
"""
Get the next minute window start after @previous_window_close. This is
defined as the first market minute strictly greater than
@previous_window_close.
"""
env = trading.environment
return env.next_market_minute(previous_window_close)
def window_open(self, window_close):
"""
For a period ending on `window_end`, calculate the date of the first
minute bar that should be used to roll a digest for this frequency.
"""
if self.unit_str == 'd':
return self.day_window_open(window_close, self.num)
elif self.unit_str == 'm':
return self.minute_window_open(window_close, self.num)
def window_close(self, window_start):
"""
For a period starting on `window_start`, calculate the date of the last
minute bar that should be used to roll a digest for this frequency.
"""
if self.unit_str == 'd':
return self.day_window_close(window_start, self.num)
elif self.unit_str == 'm':
return self.minute_window_close(window_start, self.num)
def day_window_open(self, window_close, num_days):
"""
Get the first minute for a daily window of length @num_days with last
minute @window_close. This is calculated by searching backward until
@num_days market_closes are encountered.
"""
env = trading.environment
open_ = env.open_close_window(
window_close,
1,
offset=-(num_days - 1)
).market_open.iloc[0]
if self.data_frequency == 'daily':
open_ = pd.tslib.normalize_date(open_)
return open_
def minute_window_open(self, window_close, num_minutes):
"""
Get the first minute for a minutely window of length @num_minutes with
last minute @window_close.
This is defined as window_close if num_minutes == 1, and otherwise as
the N-1st market minute after @window_start.
"""
if num_minutes == 1:
# Short circuit this case.
return window_close
env = trading.environment
return env.market_minute_window(window_close, count=-num_minutes)[-1]
def day_window_close(self, window_start, num_days):
"""
Get the window close for a daily frequency.
If the data_frequency is minute, then this will be the last minute of
last day of the window.
If the data_frequency is minute, this will be midnight utc of the last
day of the window.
"""
env = trading.environment
if self.data_frequency != 'daily':
return env.get_open_and_close(
env.add_trading_days(num_days - 1, window_start),
)[1]
return pd.tslib.normalize_date(
env.add_trading_days(num_days - 1, window_start),
)
def minute_window_close(self, window_start, num_minutes):
"""
Get the last minute for a minutely window of length @num_minutes with
first minute @window_start.
This is defined as window_start if num_minutes == 1, and otherwise as
the N-1st market minute after @window_start.
"""
if num_minutes == 1:
# Short circuit this case.
return window_start
env = trading.environment
return env.market_minute_window(window_start, count=num_minutes)[-1]
@with_environment()
def prev_bar(self, dt, env=None):
"""
Returns the previous bar for dt.
"""
if self.unit_str == 'd':
if self.data_frequency == 'minute':
def func(dt):
return env.get_open_and_close(
env.previous_trading_day(dt))[1]
else:
func = env.previous_trading_day
else:
func = env.previous_market_minute
# Cache the function dispatch.
self.prev_bar = func
return func(dt)
@property
def max_bars(self):
if self.data_frequency == 'daily':
return self.max_days
else:
return self.max_minutes
@property
def max_days(self):
if self.data_frequency != 'daily':
raise ValueError('max_days requested in minute mode')
return self.MAX_DAYS[self.unit_str] * self.num
@property
def max_minutes(self):
"""
The maximum number of minutes required to roll a bar at this frequency.
"""
if self.data_frequency != 'minute':
raise ValueError('max_minutes requested in daily mode')
return self.MAX_MINUTES[self.unit_str] * self.num
def normalize(self, dt):
if self.data_frequency != 'daily':
return dt
return pd.tslib.normalize_date(dt)
def __eq__(self, other):
return self.freq_str == other.freq_str
def __hash__(self):
return hash(self.freq_str)
def __repr__(self):
return ''.join([str(self.__class__.__name__),
"('", self.freq_str, "')"])
class HistorySpec(object):
"""
Maps to the parameters of the history() call made by the algoscript
An object is used here so that get_history calls are not constantly
parsing the parameters and provides values for caching and indexing into
result frames.
"""
FORWARD_FILLABLE = frozenset({'price'})
@classmethod
def spec_key(cls, bar_count, freq_str, field, ffill):
"""
Used as a hash/key value for the HistorySpec.
"""
return "{0}:{1}:{2}:{3}".format(
bar_count, freq_str, field, ffill)
def __init__(self, bar_count, frequency, field, ffill,
data_frequency='daily'):
# Number of bars to look back.
self.bar_count = bar_count
if isinstance(frequency, str):
frequency = Frequency(frequency, data_frequency)
if frequency.unit_str == 'm' and data_frequency == 'daily':
raise IncompatibleHistoryFrequency(
frequency=frequency.unit_str,
data_frequency=data_frequency,
)
# The frequency at which the data is sampled.
self.frequency = frequency
# The field, e.g. 'price', 'volume', etc.
self.field = field
# Whether or not to forward fill nan data. Only has an effect if this
# spec's field is in FORWARD_FILLABLE.
self._ffill = ffill
# Calculate the cache key string once.
self.key_str = self.spec_key(
bar_count, frequency.freq_str, field, ffill)
@property
def ffill(self):
"""
Wrapper around self._ffill that returns False for fields which are not
forward-fillable.
"""
return self._ffill and self.field in self.FORWARD_FILLABLE
def __repr__(self):
return ''.join([self.__class__.__name__, "('", self.key_str, "')"])
def days_index_at_dt(history_spec, algo_dt):
"""
Get the index of a frame to be used for a get_history call with daily
frequency.
"""
env = trading.environment
# Get the previous (bar_count - 1) days' worth of market closes.
day_delta = (history_spec.bar_count - 1) * history_spec.frequency.num
market_closes = env.open_close_window(
algo_dt,
day_delta,
offset=(-day_delta),
step=history_spec.frequency.num,
).market_close
if history_spec.frequency.data_frequency == 'daily':
market_closes = market_closes.apply(pd.tslib.normalize_date)
# Append the current algo_dt as the last index value.
# Using the 'rawer' numpy array values here because of a bottleneck
# that appeared when using DatetimeIndex
return np.append(market_closes.values, algo_dt)
def minutes_index_at_dt(history_spec, algo_dt):
"""
Get the index of a frame to be used for a get_history_call with minutely
frequency.
"""
# TODO: This is almost certainly going to be too slow for production.
env = trading.environment
return env.market_minute_window(
algo_dt,
history_spec.bar_count,
step=-1,
)[::-1]
def index_at_dt(history_spec, algo_dt):
"""
Returns index of a frame returned by get_history() with the given
history_spec and algo_dt.
The resulting index will have @history_spec.bar_count bars, increasing in
units of @history_spec.frequency, terminating at the given @algo_dt.
Note: The last bar of the returned frame represents an as-of-yet incomplete
time window, so the delta between the last and second-to-last bars is
usually always less than `@history_spec.frequency` for frequencies greater
than 1m.
"""
frequency = history_spec.frequency
if frequency.unit_str == 'd':
return days_index_at_dt(history_spec, algo_dt)
elif frequency.unit_str == 'm':
return minutes_index_at_dt(history_spec, algo_dt)
| apache-2.0 |
lcameron05/PCWG | pcwg/gui/power_curve.py | 2 | 4979 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 10 14:20:11 2016
@author: Stuart
"""
import base_dialog
import Tkinter as tk
import validation
import pandas as pd
from grid_box import DialogGridBox
from ..configuration.power_curve_configuration import PowerCurveLevel
from ..exceptions.handling import ExceptionHandler
from ..core.status import Status
class PowerCurveLevelDialog(base_dialog.BaseDialog):
def __init__(self, master, parent_dialog, item = None):
self.parent_dialog = parent_dialog
self.isNew = (item == None)
if self.isNew:
self.item = PowerCurveLevel()
else:
self.item = item
base_dialog.BaseDialog.__init__(self, master)
def body(self, master):
self.prepareColumns(master)
self.addTitleRow(master, "Power Curve Level Settings:")
self.wind_speed = self.addEntry(master, "Wind Speed:", validation.ValidateNonNegativeFloat(master), self.item.wind_speed)
self.power = self.addEntry(master, "Power:", validation.ValidateNonNegativeFloat(master), self.item.power)
self.turbulence = self.addEntry(master, "Turbulence:", validation.ValidateNonNegativeFloat(master), self.item.turbulence)
def set_item_values(self):
self.item.wind_speed = float(self.wind_speed.get())
self.item.power = float(self.power.get())
self.item.turbulence = float(self.turbulence.get())
def apply(self):
self.set_item_values()
if self.isNew:
Status.add("Power Curve Level created")
else:
Status.add("Power Curve Level updated")
class PowerCurveLevelsGridBox(DialogGridBox):
def get_headers(self):
return ["Wind Speed", "Power", "Turbulence"]
def get_item_values(self, item):
values_dict = {}
values_dict["Wind Speed"] = item.wind_speed
values_dict["Power"] = item.power
values_dict["Turbulence"] = item.turbulence
return values_dict
def new_dialog(self, master, parent_dialog, item):
return PowerCurveLevelDialog(master, self.parent_dialog, item)
def size(self):
return self.item_count()
def get(self, index):
return self.get_items()[index]
def preprocess_sort_values(self, data):
return self.change_numeric(data)
class PowerCurveConfigurationDialog(base_dialog.BaseConfigurationDialog):
def getInitialFileName(self):
return "PowerCurve"
def addFormElements(self, master, path):
self.name = self.addEntry(master, "Name:", None, self.config.name, width = 60)
self.density = self.addEntry(master, "Reference Density:", validation.ValidateNonNegativeFloat(master), self.config.density)
self.power_curve_levels_grid_box = PowerCurveLevelsGridBox(master, self, self.row, self.inputColumn)
self.power_curve_levels_grid_box.add_items(self.config.power_curve_levels)
self.row += 1
self.validatedPowerCurveLevels = validation.ValidatePowerCurveLevels(master, self.power_curve_levels_grid_box)
self.validations.append(self.validatedPowerCurveLevels)
self.addPowerCurveLevelButton = tk.Button(master, text="Parse", command = self.parse_clipboard, width=5, height=1)
self.addPowerCurveLevelButton.grid(row=self.row, sticky=tk.E+tk.S, column=self.secondButtonColumn, pady=30)
def parse_clipboard(self):
clip_board_df = pd.read_clipboard()
if clip_board_df is None:
return
if len(clip_board_df.columns) < 2:
return
for index in clip_board_df.index:
self.add_clip_board_row(clip_board_df.ix[index])
self.validatedPowerCurveLevels.validate()
def add_clip_board_row(self, row):
if len(row) < 2:
return
try:
speed = float(row[0])
except:
speed = 0.0
try:
power = float(row[1])
except:
power = 0.0
if len(row) > 2:
if len(row[2]) > 0:
if row[2][-1] == '%':
turbulence = float(row[2][:-1]) * 0.01
else:
turbulence = float(row[2])
else:
turbulence = 0.1
else:
turbulence = 0.1
self.power_curve_levels_grid_box.add_item(PowerCurveLevel(speed, power, turbulence))
def setConfigValues(self):
self.config.name = self.name.get()
self.config.density = float(self.density.get())
self.config.power_curve_levels = self.power_curve_levels_grid_box.get_items()
| mit |
mahak/spark | python/pyspark/sql/tests/test_pandas_udf_grouped_agg.py | 18 | 20955 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.rdd import PythonEvalType
from pyspark.sql import Row
from pyspark.sql.functions import array, explode, col, lit, mean, sum, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.types import ArrayType, TimestampType
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return float(v + 1)
return plus_one
@property
def pandas_scalar_plus_two(self):
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_basic(self):
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
assert_frame_equal(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
assert_frame_equal(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
assert_frame_equal(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, 'not supported'):
@pandas_udf(ArrayType(TimestampType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
assert_frame_equal(expected1, result1)
def test_complex_groupby(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v)).sort('plus_one(id)')
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v)).sort('plus_one(id)')
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
# groupby one expression and one python UDF
result6 = (df.groupby(df.v % 2, plus_one(df.id))
.agg(sum_udf(df.v)).sort(['(v % 2)', 'plus_one(id)']))
expected6 = (df.groupby(df.v % 2, plus_one(df.id))
.agg(sum(df.v)).sort(['(v % 2)', 'plus_one(id)']))
# groupby one expression and one scalar pandas UDF
result7 = (df.groupby(df.v % 2, plus_two(df.id))
.agg(sum_udf(df.v)).sort(['sum(v)', 'plus_two(id)']))
expected7 = (df.groupby(df.v % 2, plus_two(df.id))
.agg(sum(df.v)).sort(['sum(v)', 'plus_two(id)']))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
assert_frame_equal(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
assert_frame_equal(expected1, result1)
assert_frame_equal(expected2, result2)
assert_frame_equal(expected3, result3)
def test_retain_group_columns(self):
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.groupby('id').agg(array_udf(df['v']).alias('v2'))
self.assertEqual(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
def test_register_vectorized_udf_basic(self):
sum_pandas_udf = pandas_udf(
lambda v: v.sum(), "integer", PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
self.assertEqual(sum_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
group_agg_pandas_udf = self.spark.udf.register("sum_pandas_udf", sum_pandas_udf)
self.assertEqual(group_agg_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
q = "SELECT sum_pandas_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
actual = sorted(map(lambda r: r[0], self.spark.sql(q).collect()))
expected = [1, 5]
self.assertEqual(actual, expected)
def test_grouped_with_empty_partition(self):
data = [Row(id=1, x=2), Row(id=1, x=3), Row(id=2, x=4)]
expected = [Row(id=1, sum=5), Row(id=2, x=4)]
num_parts = len(data) + 1
df = self.spark.createDataFrame(self.sc.parallelize(data, numSlices=num_parts))
f = pandas_udf(lambda x: x.sum(),
'int', PandasUDFType.GROUPED_AGG)
result = df.groupBy('id').agg(f(df['x']).alias('sum')).collect()
self.assertEqual(result, expected)
def test_grouped_without_group_by_clause(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max_udf(v):
return v.max()
df = self.spark.range(0, 100)
self.spark.udf.register('max_udf', max_udf)
with self.tempView("table"):
df.createTempView('table')
agg1 = df.agg(max_udf(df['id']))
agg2 = self.spark.sql("select max_udf(id) from table")
assert_frame_equal(agg1.toPandas(), agg2.toPandas())
def test_no_predicate_pushdown_through(self):
# SPARK-30921: We should not pushdown predicates of PythonUDFs through Aggregate.
import numpy as np
@pandas_udf('float', PandasUDFType.GROUPED_AGG)
def mean(x):
return np.mean(x)
df = self.spark.createDataFrame([
Row(id=1, foo=42), Row(id=2, foo=1), Row(id=2, foo=2)
])
agg = df.groupBy('id').agg(mean('foo').alias("mean"))
filtered = agg.filter(agg['mean'] > 40.0)
assert(filtered.collect()[0]["mean"] == 42.0)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_grouped_agg import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/doc/conf.py | 7 | 9408 | # -*- coding: utf-8 -*-
#
# MNE documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 11 10:45:48 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import os.path as op
from datetime import date
import sphinxgallery
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = op.dirname(__file__)
sys.path.append(op.abspath(op.join(curdir, '..', 'mne')))
sys.path.append(op.abspath(op.join(curdir, 'sphinxext')))
import mne
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
import numpy_ext.numpydoc
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'numpy_ext.numpydoc',
# 'sphinx.ext.intersphinx',
# 'flow_diagram',
'sphinxgallery.gen_gallery']
autosummary_generate = True
autodoc_default_flags = ['inherited-members']
# extensions = ['sphinx.ext.autodoc',
# 'sphinx.ext.doctest',
# 'sphinx.ext.todo',
# 'sphinx.ext.pngmath',
# 'sphinx.ext.inheritance_diagram',
# 'numpydoc',
# 'ipython_console_highlighting',
# 'only_directives']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MNE'
copyright = u'2012-%s, MNE Developers' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mne.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = ['config_doc.rst']
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
exclude_patterns = ['source/generated']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': ' ',
'source_link_position': "footer",
'bootswatch_theme': "flatly",
'navbar_sidebarrel': False,
'bootstrap_version': "3",
'navbar_links': [("Tutorials", "tutorials"),
("Gallery", "auto_examples/index"),
("Manual", "manual/index"),
("API", "python_reference"),
("FAQ", "faq"),
("Cite", "cite"),
],
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_images', sphinxgallery.glr_path_static()]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# variables to pass to HTML templating engine
build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False)))
html_context = {'use_google_analytics': True, 'use_twitter': True,
'use_media_buttons': True, 'build_dev_html': build_dev_html}
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
# ('index', 'MNE.tex', u'MNE Manual',
# u'MNE Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = True
trim_doctests_flags = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
sphinxgallery_conf = {
'examples_dirs' : ['../examples', '../tutorials'],
'gallery_dirs' : ['auto_examples', 'auto_tutorials'],
'doc_module': ('sphinxgallery', 'numpy'),
'reference_url': {
'mne': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
'mayavi': 'http://docs.enthought.com/mayavi/mayavi'},
'find_mayavi_figures': True,
'default_thumb_file': '_static/mne_helmet.png',
}
| bsd-3-clause |
manashmndl/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
Garrett-R/scikit-learn | sklearn/ensemble/forest.py | 8 | 52992 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# License: BSD 3 clause
from __future__ import division
from itertools import chain
import numpy as np
from warnings import warn
from abc import ABCMeta, abstractmethod
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array
from ..utils.validation import DataConversionWarning
from .base import BaseEnsemble, _partition_estimators
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
random_state = check_random_state(tree.random_state)
indices = random_state.randint(0, n_samples, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
tree.fit(X, y,
sample_weight=curr_sample_weight,
check_input=False)
tree.indices_ = sample_counts > 0.
else:
tree.fit(X, y,
sample_weight=sample_weight,
check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = check_array(X, dtype=DTYPE)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree.tree_, 'apply', X)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Convert data
# ensure_2d=False because there are actually unit test checking we fail
# for 1d. FIXME make this consistent in the future.
X = check_array(X, dtype=DTYPE, ensure_2d=False)
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y = self._validate_y(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y(self, y):
# Default implementation
return y
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs)(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / self.n_estimators
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def _set_oob_score(self, X, y):
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in xrange(self.n_outputs_):
predictions.append(np.zeros((n_samples,
n_classes_[k])))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict_proba(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in xrange(self.n_outputs_):
predictions[k][mask, :] += p_estimator[k]
for k in xrange(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y(self, y):
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
return y
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the majority
prediction of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, ensure_2d=False)
n_samples = len(X)
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in xrange(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in xrange(1, len(all_proba)):
for k in xrange(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in xrange(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = check_array(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[mask, :] += p_estimator
n_predictions[mask, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in xrange(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as trees in the forest.
The dimensionality of the resulting representation is approximately
``n_estimators * 2 ** max_depth``.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_samples_leaf`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
sparse_output: bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data used to build forests.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data used to build forests.
Returns
-------
X_transformed: sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data to be transformed.
Returns
-------
X_transformed: sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
JPFrancoia/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
JT5D/scikit-learn | sklearn/linear_model/ridge.py | 2 | 37593 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import safe_asarray
from ..utils import compute_class_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import _deprecate_loss_and_score_funcs
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info != 0:
raise ValueError("Failed with error code %d" % info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_dense_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_dense_cholesky_kernel(K, y, alpha, sample_weight=None):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
has_sw = isinstance(sample_weight, np.ndarray) or sample_weight != 1.0
if has_sw:
sw = np.sqrt(sample_weight)
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
dual_coef = linalg.solve(K, y, sym_pos=True, overwrite_a=True)
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=1.0, solver='auto',
max_iter=None, tol=1e-3):
"""Solve the ridge equation by the method of normal equations.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
solver : {'auto', 'svd', 'dense_cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'dense_cholesky'.
- 'dense_cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'dense_cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol: float
Precision of the solution.
Returns
-------
coef: array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = isinstance(sample_weight, np.ndarray) or sample_weight != 1.0
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if hasattr(X, '__array__'):
solver = 'dense_cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw and solver != "dense_cholesky":
warnings.warn("""sample_weight and class_weight not supported in %s,
fall back to dense_cholesky.""" % solver)
solver = 'dense_cholesky'
# There should be either 1 or n_targets penalties
alpha = safe_asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'dense_cholesky', 'svd', 'lsqr'):
ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'dense_cholesky':
if n_features > n_samples or has_sw:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_dense_cholesky_kernel(K, y, alpha,
sample_weight)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
else:
try:
coef = _solve_dense_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=1.0):
X = safe_asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'dense_cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'dense_cholesky'.
- 'dense_cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'dense_cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
`coef_` : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict, optional
Weights associated with classes in the form
``{class_label : weight}``. If not given, all classes are
supposed to have weight one.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'dense_cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Sinvular value decomposition to obtain
the solution, 'dense_cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
`coef_` : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.multilabel_:
y = column_or_1d(y, warn=True)
if self.class_weight:
cw = compute_class_weight(self.class_weight,
self.classes_, Y)
# get the class weight corresponding to each sample
sample_weight = cw[np.searchsorted(self.classes_, y)]
else:
sample_weight = 1.0
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=[0.1, 1.0, 10.0],
fit_intercept=True, normalize=False,
scoring=None, score_func=None,
loss_func=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.score_func = score_func
self.loss_func = loss_func
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X = safe_asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = _deprecate_loss_and_score_funcs(
self.loss_func, self.score_func, self.scoring,
score_overrides_loss=True
)
error = scorer is None
#error = self.score_func is None and self.loss_func is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(sample_weight * alpha, y, v, Q, QT_y)
else:
out, c = _values(sample_weight * alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]),
fit_intercept=True, normalize=False, scoring=None,
score_func=None, loss_func=None, cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.score_func = score_func
self.loss_func = loss_func
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
score_func=self.score_func,
loss_func=self.loss_func,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
# FIXME: sample_weight must be split into training/validation data
# too!
#fit_params = {'sample_weight' : sample_weight}
fit_params = {}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Parameters
----------
alphas: numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
`cv_values_` : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
`coef_` : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
`alpha_` : float
Estimated regularization parameter.
`intercept_` : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Parameters
----------
alphas: numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict, optional
Weights associated with classes in the form
``{class_label : weight}``. If not given, all classes are
supposed to have weight one.
Attributes
----------
`cv_values_` : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
`coef_` : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
`alpha_` : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]), fit_intercept=True,
normalize=False, score_func=None, loss_func=None, cv=None,
class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
score_func=score_func, loss_func=loss_func, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=1.0, class_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
class_weight : dict, optional
Weights associated with classes in the form
``{class_label : weight}``. If not given, all classes are
supposed to have weight one. This is parameter is
deprecated.
Returns
-------
self : object
Returns self.
"""
if class_weight is None:
class_weight = self.class_weight
else:
warnings.warn("'class_weight' is now an initialization parameter."
" Using it in the 'fit' method is deprecated and "
"will be removed in 0.15.", DeprecationWarning,
stacklevel=2)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.multilabel_:
y = column_or_1d(y, warn=True)
cw = compute_class_weight(class_weight,
self.classes_, Y)
# modify the sample weights with the corresponding class weight
sample_weight *= cw[np.searchsorted(self.classes_, y)]
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
2PacIsAlive/deepnet.works | deep_networks/data/dicom/plot.py | 2 | 1479 | import logging
#import pylab
import matplotlib.pyplot as plt
from skimage import measure
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
class Plotter(object):
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def plot_3d(self, image, threshold=-300):
"""
Adapted from:
https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial
"""
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
p = image.transpose(2,1,0)
verts, faces = measure.marching_cubes(p, threshold)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=0.1)
face_color = [0.5, 0.5, 1]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
plt.show()
def plot(dataset):
log.info("plotting {}".format(dataset.PatientsName))
#pylab.imshow(dataset.pixel_array, cmap=pylab.cm.bone)
def save(dataset):
log.info("saving {}".format(dataset.PatientsName))
pylab.imshow(dataset.pixel_array, cmap=pylab.cm.bone)
pylab.savefig(dataset.PatientsName + '.png',
bbox_inches='tight')
| mit |
rhattersley/cartopy | lib/cartopy/mpl/slippy_image_artist.py | 1 | 2694 | # (C) British Crown Copyright 2014 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Define the SlippyImageArtist class, which interfaces with
:class:`cartopy.io.RasterSource` instances at draw time, for interactive
dragging and zooming of raster data.
"""
from __future__ import (absolute_import, division, print_function)
from matplotlib.image import AxesImage
import matplotlib.artist
class SlippyImageArtist(AxesImage):
"""
A subclass of :class:`~matplotlib.image.AxesImage` which provides an
interface for getting a raster from the given object with interactive
slippy map type functionality.
Kwargs are passed to the AxesImage constructor.
"""
def __init__(self, ax, raster_source, **kwargs):
self.raster_source = raster_source
super(SlippyImageArtist, self).__init__(ax, **kwargs)
self.set_clip_path(ax.background_patch)
self.cache = []
ax.figure.canvas.mpl_connect('button_press_event', self.on_press)
ax.figure.canvas.mpl_connect('button_release_event', self.on_release)
self.on_release()
def on_press(self, event=None):
self.user_is_interacting = True
def on_release(self, event=None):
self.user_is_interacting = False
self.stale = True
@matplotlib.artist.allow_rasterization
def draw(self, renderer, *args, **kwargs):
if not self.get_visible():
return
ax = self.axes
window_extent = ax.get_window_extent()
[x1, y1], [x2, y2] = ax.viewLim.get_points()
if not self.user_is_interacting:
located_images = self.raster_source.fetch_raster(
ax.projection, extent=[x1, x2, y1, y2],
target_resolution=(window_extent.width, window_extent.height))
self.cache = located_images
for img, extent in self.cache:
self.set_array(img)
with ax.hold_limits():
self.set_extent(extent)
super(SlippyImageArtist, self).draw(renderer, *args, **kwargs)
| lgpl-3.0 |
belkinsky/SFXbot | src/pyAudioAnalysis/analyzeMovieSound.py | 1 | 6130 | import os, sys, shutil, glob, numpy, csv, pickle
import scipy.io.wavfile as wavfile
from . import audioBasicIO
from . import audioTrainTest as aT
from . import audioSegmentation as aS
import matplotlib.pyplot as plt
import scipy.spatial.distance
minDuration = 7;
def classifyFolderWrapper(inputFolder, modelType, modelName, outputMode=False):
if not os.path.isfile(modelName):
raise Exception("Input modelName not found!")
if modelType=='svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadSVModel(modelName)
elif modelType=='knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadKNNModel(modelName)
PsAll = numpy.zeros((len(classNames), ))
files = "*.wav"
if os.path.isdir(inputFolder):
strFilePattern = os.path.join(inputFolder, files)
else:
strFilePattern = inputFolder + files
wavFilesList = []
wavFilesList.extend(glob.glob(strFilePattern))
wavFilesList = sorted(wavFilesList)
if len(wavFilesList)==0:
print("No WAV files found!")
return
Results = []
for wavFile in wavFilesList:
[Fs, x] = audioBasicIO.readAudioFile(wavFile)
signalLength = x.shape[0] / float(Fs)
[Result, P, classNames] = aT.fileClassification(wavFile, modelName, modelType)
PsAll += (numpy.array(P) * signalLength)
Result = int(Result)
Results.append(Result)
if outputMode:
print("{0:s}\t{1:s}".format(wavFile,classNames[Result]))
Results = numpy.array(Results)
# print distribution of classes:
[Histogram, _] = numpy.histogram(Results, bins=numpy.arange(len(classNames)+1))
if outputMode:
for i,h in enumerate(Histogram):
print("{0:20s}\t\t{1:d}".format(classNames[i], h))
PsAll = PsAll / numpy.sum(PsAll)
if outputMode:
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title("Classes percentage " + inputFolder.replace('Segments',''))
ax.axis((0, len(classNames)+1, 0, 1))
ax.set_xticks(numpy.array(list(range(len(classNames)+1))))
ax.set_xticklabels([" "] + classNames)
ax.bar(numpy.array(list(range(len(classNames))))+0.5, PsAll)
plt.show()
return classNames, PsAll
def getMusicSegmentsFromFile(inputFile):
modelType = "svm"
modelName = "data/svmMovies8classes"
dirOutput = inputFile[0:-4] + "_musicSegments"
if os.path.exists(dirOutput) and dirOutput!=".":
shutil.rmtree(dirOutput)
os.makedirs(dirOutput)
[Fs, x] = audioBasicIO.readAudioFile(inputFile)
if modelType=='svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadSVModel(modelName)
elif modelType=='knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadKNNModel(modelName)
flagsInd, classNames, acc, CM = aS.mtFileClassification(inputFile, modelName, modelType, plotResults = False, gtFile = "")
segs, classes = aS.flags2segs(flagsInd, mtStep)
for i, s in enumerate(segs):
if (classNames[int(classes[i])] == "Music") and (s[1] - s[0] >= minDuration):
strOut = "{0:s}{1:.3f}-{2:.3f}.wav".format(dirOutput+os.sep, s[0], s[1])
wavfile.write( strOut, Fs, x[int(Fs*s[0]):int(Fs*s[1])])
def analyzeDir(dirPath):
for i,f in enumerate(glob.glob(dirPath + os.sep + '*.wav')): # for each WAV file
getMusicSegmentsFromFile(f)
[c, P]= classifyFolderWrapper(f[0:-4] + "_musicSegments", "svm", "data/svmMusicGenre8", False)
if i==0:
print("".ljust(100)+"\t", end=' ')
for C in c:
print(C.ljust(12)+"\t", end=' ')
print()
print(f.ljust(100)+"\t", end=' ')
for p in P:
print("{0:.2f}".format(p).ljust(12)+"\t", end=' ')
print()
def main(argv):
if argv[1]=="--file":
getMusicSegmentsFromFile(argv[2])
classifyFolderWrapper(argv[2][0:-4] + "_musicSegments", "svm", "data/svmMusicGenre8", True)
elif argv[1]=="--dir":
analyzeDir(argv[2])
elif argv[1]=="--sim":
csvFile = argv[2]
f = []
fileNames = []
with open(csvFile, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter='\t', quotechar='|')
for j,row in enumerate(spamreader):
if j>0:
ftemp = []
for i in range(1,9):
ftemp.append(float(row[i]))
f.append(ftemp)
R = row[0]
II = R.find(".wav");
fileNames.append(row[0][0:II])
f = numpy.array(f)
Sim = numpy.zeros((f.shape[0], f.shape[0]))
for i in range(f.shape[0]):
for j in range(f.shape[0]):
Sim[i,j] = scipy.spatial.distance.cdist(numpy.reshape(f[i,:], (f.shape[1],1)).T, numpy.reshape(f[j,:], (f.shape[1],1)).T, 'cosine')
Sim1 = numpy.reshape(Sim, (Sim.shape[0]*Sim.shape[1], 1))
plt.hist(Sim1)
plt.show()
fo = open(csvFile + "_simMatrix", "wb")
pickle.dump(fileNames, fo, protocol = pickle.HIGHEST_PROTOCOL)
pickle.dump(f, fo, protocol = pickle.HIGHEST_PROTOCOL)
pickle.dump(Sim, fo, protocol = pickle.HIGHEST_PROTOCOL)
fo.close()
elif argv[1]=="--loadsim":
try:
fo = open(argv[2], "rb")
except IOError:
print("didn't find file")
return
try:
fileNames = pickle.load(fo)
f = pickle.load(fo)
Sim = pickle.load(fo)
except:
fo.close()
fo.close()
print(fileNames)
Sim1 = numpy.reshape(Sim, (Sim.shape[0]*Sim.shape[1], 1))
plt.hist(Sim1)
plt.show()
elif argv[1]=="--audio-event-dir":
files = "*.wav"
inputFolder = argv[2]
if os.path.isdir(inputFolder):
strFilePattern = os.path.join(inputFolder, files)
else:
strFilePattern = inputFolder + files
wavFilesList = []
wavFilesList.extend(glob.glob(strFilePattern))
wavFilesList = sorted(wavFilesList)
for i,w in enumerate(wavFilesList):
[flagsInd, classesAll, acc, CM] = aS.mtFileClassification(w, "data/svmMovies8classes", "svm", False, '')
histTemp = numpy.zeros( (len(classesAll), ) )
for f in flagsInd:
histTemp[int(f)] += 1.0
histTemp /= histTemp.sum()
if i==0:
print("".ljust(100)+"\t", end=' ')
for C in classesAll:
print(C.ljust(12)+"\t", end=' ')
print()
print(w.ljust(100)+"\t", end=' ')
for h in histTemp:
print("{0:.2f}".format(h).ljust(12)+"\t", end=' ')
print()
return 0
if __name__ == '__main__':
main(sys.argv)
| mit |
mlovci/mpld3 | examples/interactive_legend.py | 20 | 1601 | """
Interactive legend plugin
=========================
This is a demonstration of how to add an interactive legend to data plots.
The Plugin is defined within mpld3. The user provides how select/unselect
and legend overlay
will affect the alpha parameter of associated objects.
You can also control how to initialize the graph: all selected or unselected.
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import mpld3
from mpld3 import plugins
np.random.seed(9615)
# generate df
N = 100
df = pd.DataFrame((.1 * (np.random.random((N, 5)) - .5)).cumsum(0),
columns=['a', 'b', 'c', 'd', 'e'],)
# plot line + confidence interval
fig, ax = plt.subplots()
ax.grid(True, alpha=0.3)
for key, val in df.iteritems():
l, = ax.plot(val.index, val.values, label=key)
ax.fill_between(val.index,
val.values * .5, val.values * 1.5,
color=l.get_color(), alpha=.4)
# define interactive legend
handles, labels = ax.get_legend_handles_labels() # return lines and labels
interactive_legend = plugins.InteractiveLegendPlugin(zip(handles,
ax.collections),
labels,
alpha_unsel=0.5,
alpha_over=1.5,
start_visible=True)
plugins.connect(fig, interactive_legend)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('Interactive legend', size=20)
mpld3.show()
| bsd-3-clause |
ctk3b/mdtraj | mdtraj/testing/testing.py | 5 | 8311 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors: Kyle A Beauchamp
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# imports
##############################################################################
from __future__ import print_function, division
import ast
import os
import sys
import functools
import numpy as np
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_array_almost_equal, assert_array_almost_equal_nulp,
assert_array_equal, assert_array_less, assert_array_max_ulp, assert_equal,
assert_raises, assert_string_equal, assert_warns)
from numpy.testing.decorators import skipif, slow
from nose.tools import ok_, eq_, raises
from nose import SkipTest
from pkg_resources import resource_filename
# py2/3 compatibility
from mdtraj.utils.six import iteritems, integer_types, PY2
# if the system doesn't have scipy, we'd like
# this package to still work:
# we'll just redefine isspmatrix as a function that always returns
# false
try:
from scipy.sparse import isspmatrix
except ImportError:
isspmatrix = lambda x: False
try:
# need special logic to check for equality of pandas DataFrames.
# but this is only relevant if the user has pandas installed
import pandas as pd
except ImportError:
pass
__all__ = ['assert_allclose', 'assert_almost_equal', 'assert_approx_equal',
'assert_array_almost_equal', 'assert_array_almost_equal_nulp',
'assert_array_equal', 'assert_array_less', 'assert_array_max_ulp',
'assert_equal', 'assert_raises',
'assert_string_equal', 'assert_warns', 'get_fn', 'eq',
'assert_dict_equal', 'assert_sparse_matrix_equal',
'expected_failure', 'SkipTest', 'ok_', 'eq_', 'raises', 'skipif',
'slow']
##############################################################################
# functions
##############################################################################
def get_fn(name):
"""Get the full path to one of the reference files shipped for testing
In the source distribution, these files are in ``MDTraj/testing/reference``,
but on installation, they're moved to somewhere in the user's python
site-packages directory.
Parameters
----------
name : str
Name of the file to load (with respect to the reference/ folder).
Examples
--------
>>> import mdtraj as md
>>> t = md.load(get_fn('2EQQ.pdb'))
>>> eq(t.n_frames, 20) # this runs the assert, using the eq() func.
"""
fn = resource_filename('mdtraj', os.path.join('testing', 'reference', name))
if not os.path.exists(fn):
raise ValueError('Sorry! %s does not exists. If you just '
'added it, you\'ll have to re install' % fn)
return fn
def eq(o1, o2, decimal=6, err_msg=''):
"""Convenience function for asserting that two objects are equal to one another
If the objects are both arrays or sparse matrices, this method will
dispatch to an appropriate handler, which makes it a little bit more
useful than just calling ``assert o1 == o2`` (which wont work for numpy
arrays -- it returns an array of bools, not a single True or False)
Parameters
----------
o1 : object
The first object
o2 : object
The second object
decimal : int
If the two objects are floats or arrays of floats, they'll be checked for
equality up to this decimal place.
err_msg : str
Custom error message
Returns
-------
passed : bool
True if the tests pass. If the tests doesn't pass, since the AssertionError will be raised
Raises
------
AssertionError
If the tests fail
"""
if isinstance(o1, integer_types) and isinstance(o2, integer_types) and PY2:
eq_(long(o1), long(o2))
return
assert (type(o1) is type(o2)), 'o1 and o2 not the same type: %s %s' % (type(o1), type(o2))
if isinstance(o1, dict):
assert_dict_equal(o1, o1, decimal)
elif isinstance(o1, float):
np.testing.assert_almost_equal(o1, o2, decimal)
elif isspmatrix(o1):
assert_sparse_matrix_equal(o1, o1, decimal)
elif isinstance(o1, np.ndarray):
if o1.dtype.kind == 'f' or o2.dtype.kind == 'f':
# compare floats for almost equality
assert_array_almost_equal(o1, o2, decimal, err_msg=err_msg)
elif o1.dtype.type == np.core.records.record:
# if its a record array, we need to comparse each term
assert o1.dtype.names == o2.dtype.names
for name in o1.dtype.names:
eq(o1[name], o2[name], decimal=decimal, err_msg=err_msg)
else:
# compare everything else (ints, bools) for absolute equality
assert_array_equal(o1, o2, err_msg=err_msg)
elif 'pandas' in sys.modules and isinstance(o1, pd.DataFrame):
# pandas dataframes are basically like dictionaries of numpy arrayss
assert_dict_equal(o1, o2, decimal=decimal)
elif isinstance(o1, ast.AST) and isinstance(o2, ast.AST):
eq_(ast.dump(o1), ast.dump(o2))
# probably these are other specialized types
# that need a special check?
else:
eq_(o1, o2, msg=err_msg)
return True
def assert_dict_equal(t1, t2, decimal=6):
"""Assert two dicts are equal. This method should actually
work for any dict of numpy arrays/objects
Parameters
----------
t1 : object
t2 : object
decimal : int
Number of decimal places to check, for arrays inside the dicts
"""
# make sure the keys are the same
eq_(list(t1.keys()), list(t2.keys()))
for key, val in iteritems(t1):
# compare numpy arrays using numpy.testing
if isinstance(val, np.ndarray) or ('pandas' in sys.modules and isinstance(t1, pd.DataFrame)):
if val.dtype.kind == 'f':
# compare floats for almost equality
assert_array_almost_equal(val, t2[key], decimal)
else:
# compare everything else (ints, bools) for absolute equality
assert_array_equal(val, t2[key])
else:
eq_(val, t2[key])
def assert_sparse_matrix_equal(m1, m2, decimal=6):
"""Assert two scipy.sparse matrices are equal.
Parameters
----------
m1 : sparse_matrix
m2 : sparse_matrix
decimal : int
Number of decimal places to check.
"""
# both are sparse matricies
assert isspmatrix(m1)
assert isspmatrix(m1)
# make sure they have the same format
eq_(m1.format, m2.format)
# even though its called assert_array_almost_equal, it will
# work for scalars
assert_array_almost_equal((m1 - m2).sum(), 0, decimal=decimal)
# decorator to mark tests as expected failure
def expected_failure(test):
@functools.wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except BaseException:
raise SkipTest
else:
raise AssertionError('Failure expected')
return inner
# decorator to skip tests
def skip(reason):
def wrap(test):
@functools.wraps(test)
def inner(*args, **kwargs):
raise SkipTest
print("After f(*args)")
return inner
return wrap
| lgpl-2.1 |
nesterione/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 35 | 15016 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
liikGit/MissionPlanner | Lib/site-packages/scipy/stats/distributions.py | 53 | 207806 | # Functions to implement several important functions for
# various Continous and Discrete Probability Distributions
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
import math
import warnings
from copy import copy
from scipy.misc import comb, derivative
from scipy import special
from scipy import optimize
from scipy import integrate
from scipy.special import gammaln as gamln
import inspect
from numpy import alltrue, where, arange, putmask, \
ravel, take, ones, sum, shape, product, repeat, reshape, \
zeros, floor, logical_and, log, sqrt, exp, arctanh, tan, sin, arcsin, \
arctan, tanh, ndarray, cos, cosh, sinh, newaxis, array, log1p, expm1
from numpy import atleast_1d, polyval, ceil, place, extract, \
any, argsort, argmax, vectorize, r_, asarray, nan, inf, pi, isinf, \
power, NINF, empty
import numpy
import numpy as np
import numpy.random as mtrand
from numpy import flatnonzero as nonzero
import vonmises_cython
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n==0):
return 1.0
elif (n==1):
if mu is None:
val = moment_func(1,*args)
else:
val = mu
elif (n==2):
if mu2 is None or mu is None:
val = moment_func(2,*args)
else:
val = mu2 + mu*mu
elif (n==3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3,*args)
else:
mu3 = g1*(mu2**1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu**3 # 3rd non-central moment
elif (n==4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4,*args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*(mu2**1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu**4
else:
val = moment_func(n, *args)
return val
def _skew(data):
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / m2**1.5
def _kurtosis(data):
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
__all__ = [
'rv_continuous',
'ksone', 'kstwobign', 'norm', 'alpha', 'anglit', 'arcsine',
'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy',
'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang',
'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy',
'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l',
'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme',
'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r',
'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant',
'gausshyper', 'invgamma', 'invnorm', 'invgauss', 'invweibull',
'johnsonsb', 'johnsonsu', 'laplace', 'levy', 'levy_l',
'levy_stable', 'logistic', 'loggamma', 'loglaplace', 'lognorm',
'gilbrat', 'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't',
'nct', 'pareto', 'lomax', 'powerlaw', 'powerlognorm', 'powernorm',
'rdist', 'rayleigh', 'reciprocal', 'rice', 'recipinvgauss',
'semicircular', 'triang', 'truncexpon', 'truncnorm',
'tukeylambda', 'uniform', 'vonmises', 'wald', 'wrapcauchy',
'entropy', 'rv_discrete',
'binom', 'bernoulli', 'nbinom', 'geom', 'hypergeom', 'logser',
'poisson', 'planck', 'boltzmann', 'randint', 'zipf', 'dlaplace',
'skellam'
]
floatinfo = numpy.finfo(float)
errp = special.errprint
arr = asarray
gam = special.gamma
import types
from scipy.misc import doccer
all = alltrue
sgf = vectorize
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings.
docheaders = {'methods':"""\nMethods\n-------\n""",
'parameters':"""\nParameters\n---------\n""",
'notes':"""\nNotes\n-----\n""",
'examples':"""\nExamples\n--------\n"""}
_doc_rvs = \
"""rvs(%(shapes)s, loc=0, scale=1, size=1)
Random variates.
"""
_doc_pdf = \
"""pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = \
"""logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = \
"""pmf(x, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = \
"""logpmf(x, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = \
"""cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative density function.
"""
_doc_logcdf = \
"""logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative density function.
"""
_doc_sf = \
"""sf(x, %(shapes)s, loc=0, scale=1)
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = \
"""logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = \
"""ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = \
"""isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of sf).
"""
_doc_moment = \
"""moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = \
"""stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = \
"""entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = \
"""fit(data, %(shapes)s, loc=0, scale=1)
Parameter estimates for generic data.
"""
_doc_expect = \
"""expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = \
"""expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = \
"""median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = \
"""mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = \
"""var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = \
"""std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = \
"""interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = \
"""
Parameters
----------
x : array-like
quantiles
q : array-like
lower or upper tail probability
%(shapes)s : array-like
shape parameters
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
"""
_doc_default_longsummary = \
"""Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = \
"""Examples
--------
>>> import matplotlib.pyplot as plt
>>> numargs = %(name)s.numargs
>>> [ %(shapes)s ] = [0.9,] * numargs
>>> rv = %(name)s(%(shapes)s)
Display frozen pdf
>>> x = np.linspace(0, np.minimum(rv.dist.b, 3))
>>> h = plt.plot(x, rv.pdf(x))
Check accuracy of cdf and ppf
>>> prb = %(name)s.cdf(x, %(shapes)s)
>>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20)
Random number generation
>>> R = %(name)s.rvs(%(shapes)s, size=100)
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {'rvs':_doc_rvs,
'pdf':_doc_pdf,
'logpdf':_doc_logpdf,
'cdf':_doc_cdf,
'logcdf':_doc_logcdf,
'sf':_doc_sf,
'logsf':_doc_logsf,
'ppf':_doc_ppf,
'isf':_doc_isf,
'stats':_doc_stats,
'entropy':_doc_entropy,
'fit':_doc_fit,
'moment':_doc_moment,
'expect':_doc_expect,
'interval':_doc_interval,
'mean':_doc_mean,
'std':_doc_std,
'var':_doc_var,
'median':_doc_median,
'allmethods':_doc_allmethods,
'callparams':_doc_default_callparams,
'longsummary':_doc_default_longsummary,
'frozennote':_doc_default_frozen_note,
'example':_doc_default_example,
'default':_doc_default,
'before_notes':_doc_default_before_notes}
# Reuse common content between continous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'fit', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in
_doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(\
'Continuous', 'Discrete')
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
docdict_discrete['example'] = _doc_default_example.replace('[0.9,]',
'Replace with reasonable value')
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _build_random_array(fun, args, size=None):
# Build an array by applying function fun to
# the arguments in args, creating an array with
# the specified shape.
# Allows an integer shape n as a shorthand for (n,).
if isinstance(size, types.IntType):
size = [size]
if size is not None and len(size) != 0:
n = numpy.multiply.reduce(size)
s = apply(fun, args + (n,))
s.shape = size
return s
else:
n = 1
s = apply(fun, args + (n,))
return s[0]
random = mtrand.random_sample
rand = mtrand.rand
random_integers = mtrand.random_integers
permutation = mtrand.permutation
## Internal class to compute a ppf given a distribution.
## (needs cdf function) and uses brentq from scipy.optimize
## to compute ppf from cdf.
class general_cont_ppf(object):
def __init__(self, dist, xa=-10.0, xb=10.0, xtol=1e-14):
self.dist = dist
self.cdf = eval('%scdf'%dist)
self.xa = xa
self.xb = xb
self.xtol = xtol
self.vecfunc = sgf(self._single_call,otypes='d')
def _tosolve(self, x, q, *args):
return apply(self.cdf, (x, )+args) - q
def _single_call(self, q, *args):
return optimize.brentq(self._tosolve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol)
def __call__(self, q, *args):
return self.vecfunc(q, *args)
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
self.dist = dist
def pdf(self, x): #raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None):
kwds = self.kwds.copy()
kwds.update({'size':size})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments':moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self,k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self,k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
## NANs are returned for unsupported parameters.
## location and scale parameters are optional for each distribution.
## The shape parameters are generally required
##
## The loc and scale parameters must be given as keyword parameters.
## These are related to the common symbols in the .lyx file
## skew is third central moment / variance**(1.5)
## kurtosis is fourth central moment / variance**2 - 3
## References::
## Documentation for ranlib, rv2, cdflib and
##
## Eric Wesstein's world of mathematics http://mathworld.wolfram.com/
## http://mathworld.wolfram.com/topics/StatisticalDistributions.html
##
## Documentation to Regress+ by Michael McLaughlin
##
## Engineering and Statistics Handbook (NIST)
## http://www.itl.nist.gov/div898/handbook/index.htm
##
## Documentation for DATAPLOT from NIST
## http://www.itl.nist.gov/div898/software/dataplot/distribu.htm
##
## Norman Johnson, Samuel Kotz, and N. Balakrishnan "Continuous
## Univariate Distributions", second edition,
## Volumes I and II, Wiley & Sons, 1994.
## Each continuous random variable as the following methods
##
## rvs -- Random Variates (alternatively calling the class could produce these)
## pdf -- PDF
## logpdf -- log PDF (more numerically accurate if possible)
## cdf -- CDF
## logcdf -- log of CDF
## sf -- Survival Function (1-CDF)
## logsf --- log of SF
## ppf -- Percent Point Function (Inverse of CDF)
## isf -- Inverse Survival Function (Inverse of SF)
## stats -- Return mean, variance, (Fisher's) skew, or (Fisher's) kurtosis
## nnlf -- negative log likelihood function (to minimize)
## fit -- Model-fitting
##
## Maybe Later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
## To define a new random variable you subclass the rv_continuous class
## and re-define the
##
## _pdf method which will be given clean arguments (in between a and b)
## and passing the argument check method
##
## If postive argument checking is not correct for your RV
## then you will also need to re-define
## _argcheck
## Correct, but potentially slow defaults exist for the remaining
## methods but for speed and/or accuracy you can over-ride
##
## _cdf, _ppf, _rvs, _isf, _sf
##
## Rarely would you override _isf and _sf but you could for numerical precision.
##
## Statistics are computed using numerical integration by default.
## For speed you can redefine this using
##
## _stats --- take shape parameters and return mu, mu2, g1, g2
## --- If you can't compute one of these return it as None
##
## --- Can also be defined with a keyword argument moments=<str>
## where <str> is a string composed of 'm', 'v', 's',
## and/or 'k'. Only the components appearing in string
## should be computed and returned in the order 'm', 'v',
## 's', or 'k' with missing values returned as None
##
## OR
##
## You can override
##
## _munp -- takes n and shape parameters and returns
## -- then nth non-central moment of the distribution.
##
def valarray(shape,value=nan,typecode=None):
"""Return an array of all value.
"""
out = reshape(repeat([value],product(shape,axis=0),axis=0),shape)
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = arr(out)
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4,5))
>>> B = 2
>>> C = rand((1,5))
>>> cond = np.ones(A.shape)
>>> [A1,B1,C1] = argsreduce(cond,A,B,C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2,B2,C2] = argsreduce(cond,A,B,C)
>>> B2.shape
(15,)
"""
newargs = atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs,]
expand_arr = (cond==cond)
return [extract(cond, arr1 * expand_arr) for arr1 in newargs]
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def _fix_loc_scale(self, args, loc, scale=1):
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None:
# loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and scale is None:
# loc and scale given without keyword
loc, scale = args[-2:]
args = args[:self.numargs]
if scale is None:
scale = 1.0
if loc is None:
loc = 0.0
return args, loc, scale
def _fix_loc(self, args, loc):
args, loc, scale = self._fix_loc_scale(args, loc)
return args, loc
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self,*args,**kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
defining number of random variates (default=1)
Returns
-------
rvs : array-like
random variates of given `size`
"""
kwd_names = ['loc', 'scale', 'size', 'discrete']
loc, scale, size, discrete = map(kwds.get, kwd_names,
[None]*len(kwd_names))
args, loc, scale = self._fix_loc_scale(args, loc, scale)
cond = logical_and(self._argcheck(*args),(scale >= 0))
if not all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = numpy.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# Cast to int if discrete
if discrete:
if numpy.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
median : float
the median of the distribution.
See Also
--------
self.ppf --- inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""Confidence interval with equal areas around the median
Parameters
----------
alpha : array-like float in [0,1]
Probability that an rv will be drawn from the returned range
arg1, arg2, ... : array-like
The shape parameter(s) for the distribution (see docstring of the instance
object for more information)
loc: array-like, optioal
location parameter (deafult = 0)
scale : array-like, optional
scale paramter (default = 1)
Returns
-------
a, b: array-like (float)
end-points of range that contain alpha % of the rvs
"""
alpha = arr(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances from for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default) for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xa : float, optional
Lower bound for fixed point calculation for generic ppf.
xb : float, optional
Upper bound for fixed point calculation for generic ppf.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : object, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
rvs(<shape(s)>, loc=0, scale=1, size=1)
random variates
pdf(x, <shape(s)>, loc=0, scale=1)
probability density function
logpdf(x, <shape(s)>, loc=0, scale=1)
log of the probability density function
cdf(x, <shape(s)>, loc=0, scale=1)
cumulative density function
logcdf(x, <shape(s)>, loc=0, scale=1)
log of the cumulative density function
sf(x, <shape(s)>, loc=0, scale=1)
survival function (1-cdf --- sometimes more accurate)
logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
ppf(q, <shape(s)>, loc=0, scale=1)
percent point function (inverse of cdf --- quantiles)
isf(q, <shape(s)>, loc=0, scale=1)
inverse survival function (inverse of sf)
moment(n, <shape(s)>, loc=0, scale=1)
non-central n-th moment of the distribution. May not work for array arguments.
stats(<shape(s)>, loc=0, scale=1, moments='mv')
mean('m'), variance('v'), skew('s'), and/or kurtosis('k')
entropy(<shape(s)>, loc=0, scale=1)
(differential) entropy of the RV.
fit(data, <shape(s)>, loc=0, scale=1)
Parameter estimates for generic data
expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
median(<shape(s)>, loc=0, scale=1)
Median of the distribution.
mean(<shape(s)>, loc=0, scale=1)
Mean of the distribution.
std(<shape(s)>, loc=0, scale=1)
Standard deviation of the distribution.
var(<shape(s)>, loc=0, scale=1)
Variance of the distribution.
interval(alpha, <shape(s)>, loc=0, scale=1)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
__call__(<shape(s)>, loc=0, scale=1)
Calling a distribution instance creates a frozen RV object with the
same methods but holding the given shape, location, and scale fixed.
See Notes section.
**Parameters for Methods**
x : array-like
quantiles
q : array-like
lower or upper tail probability
<shape(s)> : array-like
shape parameters
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : string, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
n : int
order of moment to calculate in method moments
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
Notes
-----
**Frozen Distribution**
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Subclassing**
New random variables can be defined by subclassing rv_continuous class
and re-defining at least the
_pdf or the _cdf method (normalized to location 0 and scale 1)
which will be given clean arguments (in between a and b) and
passing the argument check method
If postive argument checking is not correct for your RV
then you will also need to re-define ::
_argcheck
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride ::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override _isf, _sf, and _logsf but you could.
Statistics are computed using numerical integration by default.
For speed you can redefine this using
_stats
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument moments=<str>
where <str> is a string composed of 'm', 'v', 's',
and/or 'k'. Only the components appearing in string
should be computed and returned in the order 'm', 'v',
's', or 'k' with missing values returned as None
OR
You can override
_munp
takes n and shape parameters and returns
the nth non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following::
class gaussian_gen(rv_continuous):
"Gaussian distribution"
def _pdf:
...
...
"""
def __init__(self, momtype=1, a=None, b=None, xa=-10.0, xb=10.0,
xtol=1e-14, badvalue=None, name=None, longname=None,
shapes=None, extradoc=None):
rv_generic.__init__(self)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xa = xa
self.xb = xb
self.xtol = xtol
self._size = 1
self.m = 0.0
self.moment_type = momtype
self.expandarr = 1
if not hasattr(self,'numargs'):
#allows more general subclassing with *args
cdf_signature = inspect.getargspec(self._cdf.im_func)
numargs1 = len(cdf_signature[0]) - 2
pdf_signature = inspect.getargspec(self._pdf.im_func)
numargs2 = len(pdf_signature[0]) - 2
self.numargs = max(numargs1, numargs2)
#nin correction
self.vecfunc = sgf(self._ppf_single_call,otypes='d')
self.vecfunc.nin = self.numargs + 1
self.vecentropy = sgf(self._entropy,otypes='d')
self.vecentropy.nin = self.numargs + 1
self.veccdf = sgf(self._cdf_single_call,otypes='d')
self.veccdf.nin = self.numargs + 1
self.shapes = shapes
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = sgf(self._mom0_sc,otypes='d')
else:
self.generic_moment = sgf(self._mom1_sc,otypes='d')
self.generic_moment.nin = self.numargs+1 # Because of the *args argument
# of _mom0_sc, vectorize cannot count the number of arguments correctly.
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
# generate docstring for subclass instances
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s continuous random variable.'%longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array-like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _ppf_to_solve(self, x, q,*args):
return apply(self.cdf, (x, )+args)-q
def _ppf_single_call(self, q, *args):
return optimize.brentq(self._ppf_to_solve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x,m,*args):
return x**m * self.pdf(x,*args)
def _mom0_sc(self, m,*args):
return integrate.quad(self._mom_integ0, self.a,
self.b, args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q,m,*args):
return (self.ppf(q,*args))**m
def _mom1_sc(self, m,*args):
return integrate.quad(self._mom_integ1, 0, 1,args=(m,)+args)[0]
## These are the methods you must define (standard form functions)
def _argcheck(self, *args):
# Default check for correct values on args and keywords.
# Returns condition array of 1's where arguments are correct and
# 0's where they are not.
cond = 1
for arg in args:
cond = logical_and(cond,(arr(arg) > 0))
return cond
def _pdf(self,x,*args):
return derivative(self._cdf,x,dx=1e-5,args=args,order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = mtrand.sample(self._size)
Y = self._ppf(U,*args)
return Y
def _cdf_single_call(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self.veccdf(x,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self.vecfunc(q,*args)
def _isf(self, q, *args):
return self._ppf(1.0-q,*args) #use correct _ppf for subclasses
# The actual cacluation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self,*args, **kwds):
return None, None, None, None
# Central moments
def _munp(self,n,*args):
return self.generic_moment(n,*args)
def pdf(self,x,*args,**kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
pdf : array-like
Probability density function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = arr((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
putmask(output,(1-cond0)*array(cond1,bool),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
output = place(output,cond,self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
logpdf : array-like
Log of the probability density function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = arr((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
putmask(output,(1-cond0)*array(cond1,bool),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
output = place(output,cond,self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self,x,*args,**kwds):
"""
Cumulative distribution function at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
cdf : array-like
Cumulative distribution function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,1.0)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
output = place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self,x,*args,**kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
logcdf : array-like
Log of the cumulative distribution function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,0.0)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
output = place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,x,*args,**kwds):
"""
Survival function (1-cdf) at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
sf : array-like
Survival function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
output = place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,x,*args,**kwds):
"""
Log of the Survival function log(1-cdf) at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
logsf : array-like
Log of the survival function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
output = place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV.
Parameters
----------
q : array-like
lower tail probability
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
x : array-like
quantile corresponding to the lower tail probability q.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
q,loc,scale = map(arr,(q,loc,scale))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.a*scale + loc)
output = place(output,(1-cond0)+(1-cond1)*(q!=0.0), self.badvalue)
output = place(output,cond2,self.b*scale + loc)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
output = place(output,cond,self._ppf(*goodargs)*scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function at q of the given RV.
Parameters
----------
q : array-like
upper tail probability
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
x : array-like
quantile corresponding to the upper tail probability q.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
q,loc,scale = map(arr,(q,loc,scale))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.b)
#output = place(output,(1-cond0)*(cond1==cond1), self.badvalue)
output = place(output,(1-cond0)*(cond1==cond1)+(1-cond1)*(q!=0.0), self.badvalue)
output = place(output,cond2,self.a)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) #PB replace 1-q by q
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
output = place(output,cond,self._isf(*goodargs)*scale + loc) #PB use _isf instead of _ppf
if output.ndim == 0:
return output[()]
return output
def stats(self,*args,**kwds):
"""
Some statistics of the given RV
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
moments : string, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
loc,scale,moments=map(kwds.get,['loc','scale','moments'])
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None:
# loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and scale is None:
# loc and scale given without keyword
loc, scale = args[-2:]
if N == self.numargs + 3 and moments is None:
# loc, scale, and moments
loc, scale, moments = args[-3:]
args = args[:self.numargs]
if scale is None: scale = 1.0
if loc is None: loc = 0.0
if moments is None: moments = 'mv'
loc,scale = map(arr,(loc,scale))
args = tuple(map(arr,args))
cond = self._argcheck(*args) & (scale > 0) & (loc==loc)
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*np.power(mu2,1.5) #(mu2**1.5) breaks down for nan and inf
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
out0 = place(out0,cond,mu*scale+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
out0 = place(out0,cond,mu2*scale*scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / mu2**1.5
out0 = default.copy()
out0 = place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
out0 = place(out0,cond,g2)
output.append(out0)
else: #no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds):
"""
n'th order non-central moment of distribution
Parameters
----------
n: int, n>=1
order of moment
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : float, optional
location parameter (default=0)
scale : float, optional
scale parameter (default=1)
"""
loc = kwds.get('loc', 0)
scale = kwds.get('scale', 1)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0): raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mdict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args,**mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args),axis=0)
def nnlf(self, theta, x):
# - sum (log pdf(x, theta),axis=0)
# where theta are the parameters (including loc and scale)
#
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = arr((x-loc) / scale)
cond0 = (x <= self.a) | (x >= self.b)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N*log(scale)
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
return args + self.fit_loc_scale(data, *args)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
args = list(args)
Nargs = len(args) - 2
fixedn = []
index = range(Nargs) + [-2, -1]
names = ['f%d' % n for n in range(Nargs)] + ['floc', 'fscale']
x0 = args[:]
for n, key in zip(index, names):
if kwds.has_key(key):
fixedn.append(n)
args[n] = kwds[key]
del x0[n]
if len(fixedn) == 0:
func = self.nnlf
restore = None
else:
if len(fixedn) == len(index):
raise ValueError("All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self.nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
f0...fn : hold respective shape parameters fixed.
floc : hold location parameter fixed to specified value.
fscale : hold scale parameter fixed to specified value.
optimizer : The optimizer to use. The optimizer must take func,
and starting position as the first two arguments,
plus args (for extra arguments to pass to the
function to be optimized) and disp=0 to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
"""
Narg = len(args)
if Narg > self.numargs:
raise ValueError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not (kwds.has_key('loc') and
kwds.has_key('scale')):
start = self._fitstart(data) # get distribution specific starting locations
args += start[Narg:-2]
loc = kwds.get('loc', start[-2])
scale = kwds.get('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.get('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, (str, unicode)):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
vals = optimizer(func,x0,args=(ravel(data),),disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments
"""
mu, mu2 = self.stats(*args,**{'moments':'mv'})
muhat = arr(data).mean()
mu2hat = arr(data).var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead. """
return self.fit_loc_scale(data, *args)
def freeze(self,*args,**kwds):
return rv_frozen(self,*args,**kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return val*log(val)
entr = -integrate.quad(integ,self.a,self.b)[0]
if not np.isnan(entr):
return entr
else: # try with different limits if integration problems
low,upp = self.ppf([0.001,0.999],*args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return -integrate.quad(integ,lower,upper)[0]
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
output = zeros(shape(cond0),'d')
output = place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
#I don't know when or why vecentropy got broken when numargs == 0
if self.numargs == 0:
output = place(output,cond0,self._entropy()+log(scale))
else:
output = place(output,cond0,self.vecentropy(*goodargs)+log(scale))
return output
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
func : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution
conditional : boolean (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from integrate.quad.
"""
lockwds = {'loc': loc,
'scale':scale}
if func is None:
def fun(x, *args):
return x*self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x)*self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
return integrate.quad(fun, lb, ub, **kwds)[0] / invfac
_EULER = 0.577215664901532860606512090082402431042 # -special.psi(1)
_ZETA3 = 1.202056903159594285399738161511449990765 # special.zeta(3,1) Apery's constant
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
def _cdf(self,x,n):
return 1.0-special.smirnov(n,x)
def _ppf(self,q,n):
return special.smirnovi(n,1.0-q)
ksone = ksone_gen(a=0.0,name='ksone', longname="Kolmogorov-Smirnov "\
"A one-sided test statistic.", shapes="n",
extradoc="""
General Kolmogorov-Smirnov one-sided test.
"""
)
class kstwobign_gen(rv_continuous):
def _cdf(self,x):
return 1.0-special.kolmogorov(x)
def _sf(self,x):
return special.kolmogorov(x)
def _ppf(self,q):
return special.kolmogi(1.0-q)
kstwobign = kstwobign_gen(a=0.0,name='kstwobign', longname='Kolmogorov-Smirnov two-sided (for large N)', extradoc="""
Kolmogorov-Smirnov two-sided test for large N
"""
)
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = math.sqrt(2*pi)
_norm_pdf_logC = math.log(_norm_pdf_C)
def _norm_pdf(x):
return exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return special.ndtr(x)
def _norm_logcdf(x):
return log(special.ndtr(x))
def _norm_ppf(q):
return special.ndtri(q)
class norm_gen(rv_continuous):
def _rvs(self):
return mtrand.standard_normal(self._size)
def _pdf(self,x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self,x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_cdf(-x)
def _logsf(self, x):
return _norm_logcdf(-x)
def _ppf(self,q):
return _norm_ppf(q)
def _isf(self,q):
return -_norm_ppf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(log(2*pi)+1)
norm = norm_gen(name='norm',longname='A normal',extradoc="""
Normal distribution
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
normal.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
""")
## Alpha distribution
##
class alpha_gen(rv_continuous):
def _pdf(self, x, a):
return 1.0/(x**2)/special.ndtr(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*log(x) + _norm_logpdf(a-1.0/x) - log(special.ndtr(a))
def _cdf(self, x, a):
return special.ndtr(a-1.0/x) / special.ndtr(a)
def _ppf(self, q, a):
return 1.0/arr(a-special.ndtri(q*special.ndtr(a)))
def _stats(self, a):
return [inf]*2 + [nan]*2
alpha = alpha_gen(a=0.0,name='alpha',shapes='a',extradoc="""
Alpha distribution
alpha.pdf(x,a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
where Phi(alpha) is the normal CDF, x > 0, and a > 0.
""")
## Anglit distribution
##
class anglit_gen(rv_continuous):
def _pdf(self, x):
return cos(2*x)
def _cdf(self, x):
return sin(x+pi/4)**2.0
def _ppf(self, q):
return (arcsin(sqrt(q))-pi/4)
def _stats(self):
return 0.0, pi*pi/16-0.5, 0.0, -2*(pi**4 - 96)/(pi*pi-8)**2
def _entropy(self):
return 1-log(2)
anglit = anglit_gen(a=-pi/4,b=pi/4,name='anglit', extradoc="""
Anglit distribution
anglit.pdf(x) = sin(2*x+pi/2) = cos(2*x) for -pi/4 <= x <= pi/4
""")
## Arcsine distribution
##
class arcsine_gen(rv_continuous):
def _pdf(self, x):
return 1.0/pi/sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/pi*arcsin(sqrt(x))
def _ppf(self, q):
return sin(pi/2.0*q)**2.0
def _stats(self):
#mup = 0.5, 3.0/8.0, 15.0/48.0, 35.0/128.0
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0,b=1.0,name='arcsine',extradoc="""
Arcsine distribution
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for 0 < x < 1.
""")
## Beta distribution
##
class beta_gen(rv_continuous):
def _rvs(self, a, b):
return mtrand.beta(a,b,self._size)
def _pdf(self, x, a, b):
Px = (1.0-x)**(b-1.0) * x**(a-1.0)
Px /= special.beta(a,b)
return Px
def _logpdf(self, x, a, b):
lPx = (b-1.0)*log(1.0-x) + (a-1.0)*log(x)
lPx -= log(special.beta(a,b))
return lPx
def _cdf(self, x, a, b):
return special.btdtr(a,b,x)
def _ppf(self, q, a, b):
return special.btdtri(a,b,q)
def _stats(self, a, b):
mn = a *1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*sqrt(a + b + 1) / (a + b + 2) / sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a,b))
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# special case
data = (ravel(data)-floc)/fscale
xbar = data.mean()
v = data.var(ddof=0)
fac = xbar*(1-xbar)/v - 1
a = xbar * fac
b = (1-xbar) * fac
return a, b, floc, fscale
else: # do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
beta = beta_gen(a=0.0, b=1.0, name='beta',shapes='a, b',extradoc="""
Beta distribution
beta.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b)) * x**(a-1) * (1-x)**(b-1)
for 0 < x < 1, a, b > 0.
""")
## Beta Prime
class betaprime_gen(rv_continuous):
def _rvs(self, a, b):
u1 = gamma.rvs(a,size=self._size)
u2 = gamma.rvs(b,size=self._size)
return (u1 / u2)
def _pdf(self, x, a, b):
return 1.0/special.beta(a,b)*x**(a-1.0)/(1+x)**(a+b)
def _logpdf(self, x, a, b):
return (a-1.0)*log(x) - (a+b)*log(1+x) - log(special.beta(a,b))
def _cdf_skip(self, x, a, b):
# remove for now: special.hyp2f1 is incorrect for large a
x = where(x==1.0, 1.0-1e-6,x)
return pow(x,a)*special.hyp2f1(a+b,a,1+a,-x)/a/special.beta(a,b)
def _munp(self, n, a, b):
if (n == 1.0):
return where(b > 1, a/(b-1.0), inf)
elif (n == 2.0):
return where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), inf)
elif (n == 3.0):
return where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
inf)
elif (n == 4.0):
return where(b > 4,
a*(a+1.0)*(a+2.0)*(a+3.0)/((b-4.0)*(b-3.0) \
*(b-2.0)*(b-1.0)), inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, b=500.0, name='betaprime', shapes='a, b',
extradoc="""
Beta prime distribution
betaprime.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b))
* x**(a-1) * (1-x)**(-a-b)
for x > 0, a, b > 0.
""")
## Bradford
##
class bradford_gen(rv_continuous):
def _pdf(self, x, c):
return c / (c*x + 1.0) / log(1.0+c)
def _cdf(self, x, c):
return log(1.0+c*x) / log(c+1.0)
def _ppf(self, q, c):
return ((1.0+c)**q-1)/c
def _stats(self, c, moments='mv'):
k = log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) \
+ 6*c*k*k*(3*k-14) + 12*k**3
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = log(1+c)
return k/2.0 - log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford', longname="A Bradford",
shapes='c', extradoc="""
Bradford distribution
bradford.pdf(x,c) = c/(k*(1+c*x))
for 0 < x < 1, c > 0 and k = log(1+c).
""")
## Burr
# burr with d=1 is called the fisk distribution
class burr_gen(rv_continuous):
def _pdf(self, x, c, d):
return c*d*(x**(-c-1.0))*((1+x**(-c*1.0))**(-d-1.0))
def _cdf(self, x, c, d):
return (1+x**(-c*1.0))**(-d**1.0)
def _ppf(self, q, c, d):
return (q**(-1.0/d)-1)**(-1.0/c)
def _stats(self, c, d, moments='mv'):
g2c, g2cd = gam(1-2.0/c), gam(2.0/c+d)
g1c, g1cd = gam(1-1.0/c), gam(1.0/c+d)
gd = gam(d)
k = gd*g2c*g2cd - g1c**2 * g1cd**2
mu = g1c*g1cd / gd
mu2 = k / gd**2.0
g1, g2 = None, None
g3c, g3cd = None, None
if 's' in moments:
g3c, g3cd = gam(1-3.0/c), gam(3.0/c+d)
g1 = 2*g1c**3 * g1cd**3 + gd*gd*g3c*g3cd - 3*gd*g2c*g1c*g1cd*g2cd
g1 /= sqrt(k**3)
if 'k' in moments:
if g3c is None:
g3c = gam(1-3.0/c)
if g3cd is None:
g3cd = gam(3.0/c+d)
g4c, g4cd = gam(1-4.0/c), gam(4.0/c+d)
g2 = 6*gd*g2c*g2cd * g1c**2 * g1cd**2 + gd**3 * g4c*g4cd
g2 -= 3*g1c**4 * g1cd**4 -4*gd**2*g3c*g1c*g1cd*g3cd
return mu, mu2, g1, g2
burr = burr_gen(a=0.0, name='burr', longname="Burr",
shapes="c, d", extradoc="""
Burr distribution
burr.pdf(x,c,d) = c*d * x**(-c-1) * (1+x**(-c))**(-d-1)
for x > 0.
""")
# Fisk distribution
# burr is a generalization
class fisk_gen(burr_gen):
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _stats(self, c):
return burr_gen._stats(self, c, 1.0)
def _entropy(self, c):
return 2 - log(c)
fisk = fisk_gen(a=0.0, name='fisk', longname="Fisk",
shapes='c', extradoc="""
Fisk distribution.
Also known as the log-logistic distribution.
Burr distribution with d=1.
"""
)
## Cauchy
# median = loc
class cauchy_gen(rv_continuous):
def _pdf(self, x):
return 1.0/pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi*q-pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/pi*arctan(x)
def _isf(self, q):
return tan(pi/2.0-pi*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(4*pi)
cauchy = cauchy_gen(name='cauchy',longname='Cauchy',extradoc="""
Cauchy distribution
cauchy.pdf(x) = 1/(pi*(1+x**2))
This is the t distribution with one degree of freedom.
"""
)
## Chi
## (positive square-root of chi-square)
## chi(1, loc, scale) = halfnormal
## chi(2, 0, scale) = Rayleigh
## chi(3, 0, scale) = MaxWell
class chi_gen(rv_continuous):
def _rvs(self, df):
return sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df):
return x**(df-1.)*exp(-x*x*0.5)/(2.0)**(df*0.5-1)/gam(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5,0.5*x*x)
def _ppf(self, q, df):
return sqrt(2*special.gammaincinv(df*0.5,q))
def _stats(self, df):
mu = sqrt(2)*special.gamma(df/2.0+0.5)/special.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/arr(mu2**1.5)
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= arr(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0,name='chi',shapes='df',extradoc="""
Chi distribution
chi.pdf(x,df) = x**(df-1)*exp(-x**2/2)/(2**(df/2-1)*gamma(df/2))
for x > 0.
"""
)
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
def _rvs(self, df):
return mtrand.chisquare(df,self._size)
def _pdf(self, x, df):
return exp(self._logpdf(x, df))
def _logpdf(self, x, df):
#term1 = (df/2.-1)*log(x)
#term1[(df==2)*(x==0)] = 0
#avoid 0*log(0)==nan
return (df/2.-1)*log(x+1e-300) - x/2. - gamln(df/2.) - (log(2)*df)/2.
## Px = x**(df/2.0-1)*exp(-x/2.0)
## Px /= special.gamma(df/2.0)* 2**(df/2.0)
## return log(Px)
def _cdf(self, x, df):
return special.chdtr(df, x)
def _sf(self, x, df):
return special.chdtrc(df, x)
def _isf(self, p, df):
return special.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0,name='chi2',longname='A chi-squared',shapes='df',
extradoc="""
Chi-squared distribution
chi2.pdf(x,df) = 1/(2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
"""
)
## Cosine (Approximation to the Normal)
class cosine_gen(rv_continuous):
def _pdf(self, x):
return 1.0/2/pi*(1+cos(x))
def _cdf(self, x):
return 1.0/2/pi*(pi + x + sin(x))
def _stats(self):
return 0.0, pi*pi/3.0-2.0, 0.0, -6.0*(pi**4-90)/(5.0*(pi*pi-6)**2)
def _entropy(self):
return log(4*pi)-1.0
cosine = cosine_gen(a=-pi,b=pi,name='cosine',extradoc="""
Cosine distribution (approximation to the normal)
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for -pi <= x <= pi.
""")
## Double Gamma distribution
class dgamma_gen(rv_continuous):
def _rvs(self, a):
u = random(size=self._size)
return (gamma.rvs(a,size=self._size)*where(u>=0.5,1,-1))
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*special.gamma(a))*ax**(a-1.0) * exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return (a-1.0)*log(ax) - ax - log(2) - gamln(a)
def _cdf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
return where(x>0,0.5+fac,0.5-fac)
def _sf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
#return where(x>0,0.5-0.5*fac,0.5+0.5*fac)
return where(x>0,0.5-fac,0.5+fac)
def _ppf(self, q, a):
fac = special.gammainccinv(a,1-abs(2*q-1))
return where(q>0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma',longname="A double gamma",
shapes='a',extradoc="""
Double gamma distribution
dgamma.pdf(x,a) = 1/(2*gamma(a))*abs(x)**(a-1)*exp(-abs(x))
for a > 0.
"""
)
## Double Weibull distribution
##
class dweibull_gen(rv_continuous):
def _rvs(self, c):
u = random(size=self._size)
return weibull_min.rvs(c, size=self._size)*(where(u>=0.5,1,-1))
def _pdf(self, x, c):
ax = abs(x)
Px = c/2.0*ax**(c-1.0)*exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return log(c) - log(2.0) + (c-1.0)*log(ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5*exp(-abs(x)**c)
return where(x > 0, 1-Cx1, Cx1)
def _ppf_skip(self, q, c):
fac = where(q<=0.5,2*q,2*q-1)
fac = pow(arr(log(1.0/fac)),1.0/c)
return where(q>0.5,fac,-fac)
def _stats(self, c):
var = gam(1+2.0/c)
return 0.0, var, 0.0, gam(1+4.0/c)/var
dweibull = dweibull_gen(name='dweibull',longname="A double Weibull",
shapes='c',extradoc="""
Double Weibull distribution
dweibull.pdf(x,c) = c/2*abs(x)**(c-1)*exp(-abs(x)**c)
"""
)
## ERLANG
##
## Special case of the Gamma distribution with shape parameter an integer.
##
class erlang_gen(rv_continuous):
def _rvs(self, n):
return gamma.rvs(n,size=self._size)
def _arg_check(self, n):
return (n > 0) & (floor(n)==n)
def _pdf(self, x, n):
Px = (x)**(n-1.0)*exp(-x)/special.gamma(n)
return Px
def _logpdf(self, x, n):
return (n-1.0)*log(x) - x - gamln(n)
def _cdf(self, x, n):
return special.gdtr(1.0,n,x)
def _sf(self, x, n):
return special.gdtrc(1.0,n,x)
def _ppf(self, q, n):
return special.gdtrix(1.0, n, q)
def _stats(self, n):
n = n*1.0
return n, n, 2/sqrt(n), 6/n
def _entropy(self, n):
return special.psi(n)*(1-n) + 1 + gamln(n)
erlang = erlang_gen(a=0.0,name='erlang',longname='An Erlang',
shapes='n',extradoc="""
Erlang distribution (Gamma with integer shape parameter)
"""
)
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
## scale == 1.0 / lambda
class expon_gen(rv_continuous):
def _rvs(self):
return mtrand.standard_exponential(self._size)
def _pdf(self, x):
return exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -expm1(-x)
def _ppf(self, q):
return -log1p(-q)
def _sf(self,x):
return exp(-x)
def _logsf(self, x):
return -x
def _isf(self,q):
return -log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0,name='expon',longname="An exponential",
extradoc="""
Exponential distribution
expon.pdf(x) = exp(-x)
for x >= 0.
scale = 1.0 / lambda
"""
)
## Exponentiated Weibull
class exponweib_gen(rv_continuous):
def _pdf(self, x, a, c):
exc = exp(-x**c)
return a*c*(1-exc)**arr(a-1) * exc * x**(c-1)
def _logpdf(self, x, a, c):
exc = exp(-x**c)
return log(a) + log(c) + (a-1.)*log(1-exc) - x**c + (c-1.0)*log(x)
def _cdf(self, x, a, c):
exm1c = -expm1(-x**c)
return arr((exm1c)**a)
def _ppf(self, q, a, c):
return (-log1p(-q**(1.0/a)))**arr(1.0/c)
exponweib = exponweib_gen(a=0.0,name='exponweib',
longname="An exponentiated Weibull",
shapes="a, c",extradoc="""
Exponentiated Weibull distribution
exponweib.pdf(x,a,c) = a*c*(1-exp(-x**c))**(a-1)*exp(-x**c)*x**(c-1)
for x > 0, a, c > 0.
"""
)
## Exponential Power
class exponpow_gen(rv_continuous):
def _pdf(self, x, b):
xbm1 = arr(x**(b-1.0))
xb = xbm1 * x
return exp(1)*b*xbm1 * exp(xb - exp(xb))
def _logpdf(self, x, b):
xb = x**(b-1.0)*x
return 1 + log(b) + (b-1.0)*log(x) + xb - exp(xb)
def _cdf(self, x, b):
xb = arr(x**b)
return -expm1(-expm1(xb))
def _sf(self, x, b):
xb = arr(x**b)
return exp(-expm1(xb))
def _isf(self, x, b):
return (log1p(-log(x)))**(1./b)
def _ppf(self, q, b):
return pow(log1p(-log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0,name='exponpow',longname="An exponential power",
shapes='b',extradoc="""
Exponential Power distribution
exponpow.pdf(x,b) = b*x**(b-1) * exp(1+x**b - exp(x**b))
for x >= 0, b > 0.
"""
)
## Fatigue-Life (Birnbaum-Sanders)
class fatiguelife_gen(rv_continuous):
def _rvs(self, c):
z = norm.rvs(size=self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*sqrt(1 + x2)
return t
def _pdf(self, x, c):
return (x+1)/arr(2*c*sqrt(2*pi*x**3))*exp(-(x-1)**2/arr((2.0*x*c**2)))
def _logpdf(self, x, c):
return log(x+1) - (x-1)**2 / (2.0*x*c**2) - log(2*c) - 0.5*(log(2*pi) + 3*log(x))
def _cdf(self, x, c):
return special.ndtr(1.0/c*(sqrt(x)-1.0/arr(sqrt(x))))
def _ppf(self, q, c):
tmp = c*special.ndtri(q)
return 0.25*(tmp + sqrt(tmp**2 + 4))**2
def _stats(self, c):
c2 = c*c
mu = c2 / 2.0 + 1
den = 5*c2 + 4
mu2 = c2*den /4.0
g1 = 4*c*sqrt(11*c2+6.0)/den**1.5
g2 = 6*c2*(93*c2+41.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0,name='fatiguelife',
longname="A fatigue-life (Birnbaum-Sanders)",
shapes='c',extradoc="""
Fatigue-life (Birnbaum-Sanders) distribution
fatiguelife.pdf(x,c) = (x+1)/(2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for x > 0.
"""
)
## Folded Cauchy
class foldcauchy_gen(rv_continuous):
def _rvs(self, c):
return abs(cauchy.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return 1.0/pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/pi*(arctan(x-c) + arctan(x+c))
def _stats(self, c):
return inf, inf, nan, nan
# setting xb=1000 allows to calculate ppf for up to q=0.9993
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy',xb=1000,
longname = "A folded Cauchy",
shapes='c',extradoc="""
A folded Cauchy distributions
foldcauchy.pdf(x,c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for x >= 0.
"""
)
## F
class f_gen(rv_continuous):
def _rvs(self, dfn, dfd):
return mtrand.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
# n = arr(1.0*dfn)
# m = arr(1.0*dfd)
# Px = m**(m/2) * n**(n/2) * x**(n/2-1)
# Px /= (m+n*x)**((n+m)/2)*special.beta(n/2,m/2)
return exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0*dfn
m = 1.0*dfd
lPx = m/2*log(m) + n/2*log(n) + (n/2-1)*log(x)
lPx -= ((n+m)/2)*log(m+n*x) + special.betaln(n/2,m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return special.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return special.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return special.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v2 = arr(dfd*1.0)
v1 = arr(dfn*1.0)
mu = where (v2 > 2, v2 / arr(v2 - 2), inf)
mu2 = 2*v2*v2*(v2+v1-2)/(v1*(v2-2)**2 * (v2-4))
mu2 = where(v2 > 4, mu2, inf)
g1 = 2*(v2+2*v1-2)/(v2-6)*sqrt((2*v2-4)/(v1*(v2+v1-2)))
g1 = where(v2 > 6, g1, nan)
g2 = 3/(2*v2-16)*(8+g1*g1*(v2-6))
g2 = where(v2 > 8, g2, nan)
return mu, mu2, g1, g2
f = f_gen(a=0.0,name='f',longname='An F',shapes="dfn, dfd",
extradoc="""
F distribution
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x,df1,df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for x > 0.
"""
)
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
def _rvs(self, c):
return abs(norm.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return sqrt(2.0/pi)*cosh(c*x)*exp(-(x*x+c*c)/2.0)
def _cdf(self, x, c,):
return special.ndtr(x-c) + special.ndtr(x+c) - 1.0
def _stats(self, c):
fac = special.erf(c/sqrt(2))
mu = sqrt(2.0/pi)*exp(-0.5*c*c)+c*fac
mu2 = c*c + 1 - mu*mu
c2 = c*c
g1 = sqrt(2/pi)*exp(-1.5*c2)*(4-pi*exp(c2)*(2*c2+1.0))
g1 += 2*c*fac*(6*exp(-c2) + 3*sqrt(2*pi)*c*exp(-c2/2.0)*fac + \
pi*c*(fac*fac-1))
g1 /= pi*mu2**1.5
g2 = c2*c2+6*c2+3+6*(c2+1)*mu*mu - 3*mu**4
g2 -= 4*exp(-c2/2.0)*mu*(sqrt(2.0/pi)*(c2+2)+c*(c2+3)*exp(c2/2.0)*fac)
g2 /= mu2**2.0
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0,name='foldnorm',longname='A folded normal',
shapes='c',extradoc="""
Folded normal distribution
foldnormal.pdf(x,c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for c >= 0.
"""
)
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
def _pdf(self, x, c):
return c*pow(x,c-1)*exp(-pow(x,c))
def _logpdf(self, x, c):
return log(c) + (c-1)*log(x) - pow(x,c)
def _cdf(self, x, c):
return -expm1(-pow(x,c))
def _ppf(self, q, c):
return pow(-log1p(-q),1.0/c)
def _munp(self, n, c):
return special.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0,name='frechet_r',longname="A Frechet right",
shapes='c',extradoc="""
A Frechet (right) distribution (also called Weibull minimum)
frechet_r.pdf(x,c) = c*x**(c-1)*exp(-x**c)
for x > 0, c > 0.
"""
)
weibull_min = frechet_r_gen(a=0.0,name='weibull_min',
longname="A Weibull minimum",
shapes='c',extradoc="""
A Weibull minimum distribution (also called a Frechet (right) distribution)
weibull_min.pdf(x,c) = c*x**(c-1)*exp(-x**c)
for x > 0, c > 0.
"""
)
class frechet_l_gen(rv_continuous):
def _pdf(self, x, c):
return c*pow(-x,c-1)*exp(-pow(-x,c))
def _cdf(self, x, c):
return exp(-pow(-x,c))
def _ppf(self, q, c):
return -pow(-log(q),1.0/c)
def _munp(self, n, c):
val = special.gamma(1.0+n*1.0/c)
if (int(n) % 2): sgn = -1
else: sgn = 1
return sgn*val
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0,name='frechet_l',longname="A Frechet left",
shapes='c',extradoc="""
A Frechet (left) distribution (also called Weibull maximum)
frechet_l.pdf(x,c) = c * (-x)**(c-1) * exp(-(-x)**c)
for x < 0, c > 0.
"""
)
weibull_max = frechet_l_gen(b=0.0,name='weibull_max',
longname="A Weibull maximum",
shapes='c',extradoc="""
A Weibull maximum distribution (also called a Frechet (left) distribution)
weibull_max.pdf(x,c) = c * (-x)**(c-1) * exp(-(-x)**c)
for x < 0, c > 0.
"""
)
## Generalized Logistic
##
class genlogistic_gen(rv_continuous):
def _pdf(self, x, c):
Px = c*exp(-x)/(1+exp(-x))**(c+1.0)
return Px
def _logpdf(self, x, c):
return log(c) - x - (c+1.0)*log1p(exp(-x))
def _cdf(self, x, c):
Cx = (1+exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -log(pow(q,-1.0/c)-1)
return vals
def _stats(self, c):
zeta = special.zeta
mu = _EULER + special.psi(c)
mu2 = pi*pi/6.0 + zeta(2,c)
g1 = -2*zeta(3,c) + 2*_ZETA3
g1 /= mu2**1.5
g2 = pi**4/15.0 + 6*zeta(4,c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic',
longname="A generalized logistic",
shapes='c',extradoc="""
Generalized logistic distribution
genlogistic.pdf(x,c) = c*exp(-x) / (1+exp(-x))**(c+1)
for x > 0, c > 0.
"""
)
## Generalized Pareto
class genpareto_gen(rv_continuous):
def _argcheck(self, c):
c = arr(c)
self.b = where(c < 0, 1.0/abs(c), inf)
return where(c==0, 0, 1)
def _pdf(self, x, c):
Px = pow(1+c*x,arr(-1.0-1.0/c))
return Px
def _logpdf(self, x, c):
return (-1.0-1.0/c) * np.log1p(c*x)
def _cdf(self, x, c):
return 1.0 - pow(1+c*x,arr(-1.0/c))
def _ppf(self, q, c):
vals = 1.0/c * (pow(1-q, -c)-1)
return vals
def _munp(self, n, c):
k = arange(0,n+1)
val = (-1.0/c)**n * sum(comb(n,k)*(-1)**k / (1.0-c*k),axis=0)
return where(c*n < 1, val, inf)
def _entropy(self, c):
if (c > 0):
return 1+c
else:
self.b = -1.0 / c
return rv_continuous._entropy(self, c)
genpareto = genpareto_gen(a=0.0,name='genpareto',
longname="A generalized Pareto",
shapes='c',extradoc="""
Generalized Pareto distribution
genpareto.pdf(x,c) = (1+c*x)**(-1-1/c)
for c != 0, and for x >= 0 for all c, and x < 1/abs(c) for c < 0.
"""
)
## Generalized Exponential
class genexpon_gen(rv_continuous):
def _pdf(self, x, a, b, c):
return (a+b*(-expm1(-c*x)))*exp((-a-b)*x+b*(-expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -expm1((-a-b)*x + b*(-expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-expm1(-c*x))) + (-a-b)*x+b*(-expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0,name='genexpon',
longname='A generalized exponential',
shapes='a, b, c',extradoc="""
Generalized exponential distribution (Ryu 1993)
f(x,a,b,c) = (a+b*(1-exp(-c*x))) * exp(-a*x-b*x+b/c*(1-exp(-c*x)))
for x >= 0, a,b,c > 0.
a, b, c are the first, second and third shape parameters.
References
----------
"The Exponential Distribution: Theory, Methods and Applications",
N. Balakrishnan, Asit P. Basu
"""
)
## Generalized Extreme Value
## c=0 is just gumbel distribution.
## This version does now accept c==0
## Use gumbel_r for c==0
# new version by Per Brodtkorb, see ticket:767
# also works for c==0, special case is gumbel_r
# increased precision for small c
class genextreme_gen(rv_continuous):
def _argcheck(self, c):
min = np.minimum
max = np.maximum
sml = floatinfo.machar.xmin
#self.b = where(c > 0, 1.0 / c,inf)
#self.a = where(c < 0, 1.0 / c, -inf)
self.b = where(c > 0, 1.0 / max(c, sml),inf)
self.a = where(c < 0, 1.0 / min(c,-sml), -inf)
return where(abs(c)==inf, 0, 1) #True #(c!=0)
def _pdf(self, x, c):
## ex2 = 1-c*x
## pex2 = pow(ex2,1.0/c)
## p2 = exp(-pex2)*pex2/ex2
## return p2
cx = c*x
logex2 = where((c==0)*(x==x),0.0,log1p(-cx))
logpex2 = where((c==0)*(x==x),-x,logex2/c)
pex2 = exp(logpex2)
# % Handle special cases
logpdf = where((cx==1) | (cx==-inf),-inf,-pex2+logpex2-logex2)
putmask(logpdf,(c==1) & (x==1),0.0) # logpdf(c==1 & x==1) = 0; % 0^0 situation
return exp(logpdf)
def _cdf(self, x, c):
#return exp(-pow(1-c*x,1.0/c))
loglogcdf = where((c==0)*(x==x),-x,log1p(-c*x)/c)
return exp(-exp(loglogcdf))
def _ppf(self, q, c):
#return 1.0/c*(1.-(-log(q))**c)
x = -log(-log(q))
return where((c==0)*(x==x),x,-expm1(-c*x)/c)
def _stats(self,c):
g = lambda n : gam(n*c+1)
g1 = g(1)
g2 = g(2)
g3 = g(3);
g4 = g(4)
g2mg12 = where(abs(c)<1e-7,(c*pi)**2.0/6.0,g2-g1**2.0)
gam2k = where(abs(c)<1e-7,pi**2.0/6.0, expm1(gamln(2.0*c+1.0)-2*gamln(c+1.0))/c**2.0);
eps = 1e-14
gamk = where(abs(c)<eps,-_EULER,expm1(gamln(c+1))/c)
m = where(c<-1.0,nan,-gamk)
v = where(c<-0.5,nan,g1**2.0*gam2k)
#% skewness
sk1 = where(c<-1./3,nan,np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)));
sk = where(abs(c)<=eps**0.29,12*sqrt(6)*_ZETA3/pi**3,sk1)
#% The kurtosis is:
ku1 = where(c<-1./4,nan,(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = where(abs(c)<=(eps)**0.23,12.0/5.0,ku1-3.0)
return m,v,sk,ku
def _munp(self, n, c):
k = arange(0,n+1)
vals = 1.0/c**n * sum(comb(n,k) * (-1)**k * special.gamma(c*k + 1),axis=0)
return where(c*n > -1, vals, inf)
genextreme = genextreme_gen(name='genextreme',
longname="A generalized extreme value",
shapes='c',extradoc="""
Generalized extreme value (see gumbel_r for c=0)
genextreme.pdf(x,c) = exp(-exp(-x))*exp(-x) for c==0
genextreme.pdf(x,c) = exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1)
for x <= 1/c, c > 0
"""
)
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
def _rvs(self, a):
return mtrand.standard_gamma(a, self._size)
def _pdf(self, x, a):
return x**(a-1)*exp(-x)/special.gamma(a)
def _logpdf(self, x, a):
return (a-1)*log(x) - x - gamln(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a,q)
def _stats(self, a):
return a, a, 2.0/sqrt(a), 6.0/a
def _entropy(self, a):
return special.psi(a)*(1-a) + 1 + gamln(a)
def _fitstart(self, data):
a = 4 / _skew(data)**2
return super(gamma_gen, self)._fitstart(data, args=(a,))
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc == 0:
xbar = ravel(data).mean()
logx_bar = ravel(log(data)).mean()
s = log(xbar) - logx_bar
def func(a):
return log(a) - special.digamma(a) - s
aest = (3-s + math.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
scale = xbar / a
return a, floc, scale
else:
return super(gamma_gen, self).fit(data, *args, **kwds)
gamma = gamma_gen(a=0.0,name='gamma',longname='A gamma',
shapes='a',extradoc="""
Gamma distribution
For a = integer, this is the Erlang distribution, and for a=1 it is the
exponential distribution.
gamma.pdf(x,a) = x**(a-1)*exp(-x)/gamma(a)
for x >= 0, a > 0.
"""
)
# Generalized Gamma
class gengamma_gen(rv_continuous):
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return abs(c)* exp((c*a-1)*log(x)-x**c- gamln(a))
def _cdf(self, x, a, c):
val = special.gammainc(a,x**c)
cond = c + 0*val
return where(cond>0,val,1-val)
def _ppf(self, q, a, c):
val1 = special.gammaincinv(a,q)
val2 = special.gammaincinv(a,1.0-q)
ic = 1.0/c
cond = c+0*val1
return where(cond > 0,val1**ic,val2**ic)
def _munp(self, n, a, c):
return special.gamma(a+n*1.0/c) / special.gamma(a)
def _entropy(self, a,c):
val = special.psi(a)
return a*(1-val) + 1.0/c*val + gamln(a)-log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma',
longname='A generalized gamma',
shapes="a, c", extradoc="""
Generalized gamma distribution
gengamma.pdf(x,a,c) = abs(c)*x**(c*a-1)*exp(-x**c)/gamma(a)
for x > 0, a > 0, and c != 0.
"""
)
## Generalized Half-Logistic
##
class genhalflogistic_gen(rv_continuous):
def _argcheck(self, c):
self.b = 1.0 / c
return (c > 0)
def _pdf(self, x, c):
limit = 1.0/c
tmp = arr(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = arr(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self,c):
return 2 - (2*c+1)*log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic',
longname="A generalized half-logistic",
shapes='c',extradoc="""
Generalized half-logistic
genhalflogistic.pdf(x,c) = 2*(1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for 0 <= x <= 1/c, and c > 0.
"""
)
## Gompertz (Truncated Gumbel)
## Defined for x>=0
class gompertz_gen(rv_continuous):
def _pdf(self, x, c):
ex = exp(x)
return c*ex*exp(-c*(ex-1))
def _cdf(self, x, c):
return 1.0-exp(-c*(exp(x)-1))
def _ppf(self, q, c):
return log(1-1.0/c*log(1-q))
def _entropy(self, c):
return 1.0 - log(c) - exp(c)*special.expn(1,c)
gompertz = gompertz_gen(a=0.0, name='gompertz',
longname="A Gompertz (truncated Gumbel) distribution",
shapes='c',extradoc="""
Gompertz (truncated Gumbel) distribution
gompertz.pdf(x,c) = c*exp(x) * exp(-c*(exp(x)-1))
for x >= 0, c > 0.
"""
)
## Gumbel, Log-Weibull, Fisher-Tippett, Gompertz
## The left-skewed gumbel distribution.
## and right-skewed are available as gumbel_l and gumbel_r
class gumbel_r_gen(rv_continuous):
def _pdf(self, x):
ex = exp(-x)
return ex*exp(-ex)
def _logpdf(self, x):
return -x - exp(-x)
def _cdf(self, x):
return exp(-exp(-x))
def _logcdf(self, x):
return -exp(-x)
def _ppf(self, q):
return -log(-log(q))
def _stats(self):
return _EULER, pi*pi/6.0, \
12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_r = gumbel_r_gen(name='gumbel_r',longname="A (right-skewed) Gumbel",
extradoc="""
Right-skewed Gumbel (Log-Weibull, Fisher-Tippett, Gompertz) distribution
gumbel_r.pdf(x) = exp(-(x+exp(-x)))
"""
)
class gumbel_l_gen(rv_continuous):
def _pdf(self, x):
ex = exp(x)
return ex*exp(-ex)
def _logpdf(self, x):
return x - exp(x)
def _cdf(self, x):
return 1.0-exp(-exp(x))
def _ppf(self, q):
return log(-log(1-q))
def _stats(self):
return -_EULER, pi*pi/6.0, \
-12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_l = gumbel_l_gen(name='gumbel_l',longname="A left-skewed Gumbel",
extradoc="""
Left-skewed Gumbel distribution
gumbel_l.pdf(x) = exp(x - exp(x))
"""
)
# Half-Cauchy
class halfcauchy_gen(rv_continuous):
def _pdf(self, x):
return 2.0/pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/pi) - np.log1p(x*x)
def _cdf(self, x):
return 2.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi/2*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(2*pi)
halfcauchy = halfcauchy_gen(a=0.0,name='halfcauchy',
longname="A Half-Cauchy",extradoc="""
Half-Cauchy distribution
halfcauchy.pdf(x) = 2/(pi*(1+x**2))
for x >= 0.
"""
)
## Half-Logistic
##
class halflogistic_gen(rv_continuous):
def _pdf(self, x):
return 0.5/(cosh(x/2.0))**2.0
def _cdf(self, x):
return tanh(x/2.0)
def _ppf(self, q):
return 2*arctanh(q)
def _munp(self, n):
if n==1: return 2*log(2)
if n==2: return pi*pi/3.0
if n==3: return 9*_ZETA3
if n==4: return 7*pi**4 / 15.0
return 2*(1-pow(2.0,1-n))*special.gamma(n+1)*special.zeta(n,1)
def _entropy(self):
return 2-log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic',
longname="A half-logistic",
extradoc="""
Half-logistic distribution
halflogistic.pdf(x) = 2*exp(-x)/(1+exp(-x))**2 = 1/2*sech(x/2)**2
for x >= 0.
"""
)
## Half-normal = chi(1, loc, scale)
class halfnorm_gen(rv_continuous):
def _rvs(self):
return abs(norm.rvs(size=self._size))
def _pdf(self, x):
return sqrt(2.0/pi)*exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/pi) - x*x/2.0
def _cdf(self, x):
return special.ndtr(x)*2-1.0
def _ppf(self, q):
return special.ndtri((1+q)/2.0)
def _stats(self):
return sqrt(2.0/pi), 1-2.0/pi, sqrt(2)*(4-pi)/(pi-2)**1.5, \
8*(pi-3)/(pi-2)**2
def _entropy(self):
return 0.5*log(pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm',
longname="A half-normal",
extradoc="""
Half-normal distribution
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for x > 0.
"""
)
## Hyperbolic Secant
class hypsecant_gen(rv_continuous):
def _pdf(self, x):
return 1.0/(pi*cosh(x))
def _cdf(self, x):
return 2.0/pi*arctan(exp(x))
def _ppf(self, q):
return log(tan(pi*q/2.0))
def _stats(self):
return 0, pi*pi/4, 0, 2
def _entropy(self):
return log(2*pi)
hypsecant = hypsecant_gen(name='hypsecant',longname="A hyperbolic secant",
extradoc="""
Hyperbolic secant distribution
hypsecant.pdf(x) = 1/pi * sech(x)
"""
)
## Gauss Hypergeometric
class gausshyper_gen(rv_continuous):
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c==c) & (z==z)
def _pdf(self, x, a, b, c, z):
Cinv = gam(a)*gam(b)/gam(a+b)*special.hyp2f1(c,a,a+b,-z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = special.beta(n+a,b) / special.beta(a,b)
num = special.hyp2f1(c,a+n,a+b+n,-z)
den = special.hyp2f1(c,a,a+b,-z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper',
longname="A Gauss hypergeometric",
shapes="a, b, c, z",
extradoc="""
Gauss hypergeometric distribution
gausshyper.pdf(x,a,b,c,z) = C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for 0 <= x <= 1, a > 0, b > 0, and
C = 1/(B(a,b)F[2,1](c,a;a+b;-z))
"""
)
## Inverted Gamma
# special case of generalized gamma with c=-1
#
class invgamma_gen(rv_continuous):
def _pdf(self, x, a):
return exp(self._logpdf(x,a))
def _logpdf(self, x, a):
return (-(a+1)*log(x)-gamln(a) - 1.0/x)
def _cdf(self, x, a):
return 1.0-special.gammainc(a, 1.0/x)
def _ppf(self, q, a):
return 1.0/special.gammaincinv(a,1-q)
def _munp(self, n, a):
return exp(gamln(a-n) - gamln(a))
def _entropy(self, a):
return a - (a+1.0)*special.psi(a) + gamln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma',longname="An inverted gamma",
shapes='a',extradoc="""
Inverted gamma distribution
invgamma.pdf(x,a) = x**(-a-1)/gamma(a) * exp(-1/x)
for x > 0, a > 0.
"""
)
## Inverse Normal Distribution
# scale is gamma from DATAPLOT and B from Regress
_invnorm_msg = \
"""The `invnorm` distribution will be renamed to `invgauss` after scipy 0.9"""
class invnorm_gen(rv_continuous):
def _rvs(self, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
return mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
fac = sqrt(1.0/x)
C1 = norm.cdf(fac*(x-mu)/mu)
C1 += exp(2.0/mu)*norm.cdf(-fac*(x+mu)/mu)
return C1
def _stats(self, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invnorm = invnorm_gen(a=0.0, name='invnorm', longname="An inverse normal",
shapes="mu",extradoc="""
Inverse normal distribution
NOTE: `invnorm` will be renamed to `invgauss` after scipy 0.9
invnorm.pdf(x,mu) = 1/sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for x > 0.
"""
)
## Inverse Gaussian Distribution (used to be called 'invnorm'
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
def _rvs(self, mu):
return mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = sqrt(1.0/x)
C1 = norm.cdf(fac*(x-mu)/mu)
C1 += exp(2.0/mu)*norm.cdf(-fac*(x+mu)/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss', longname="An inverse Gaussian",
shapes="mu",extradoc="""
Inverse Gaussian distribution
invgauss.pdf(x,mu) = 1/sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for x > 0.
"""
)
## Inverted Weibull
class invweibull_gen(rv_continuous):
def _pdf(self, x, c):
xc1 = x**(-c-1.0)
#xc2 = xc1*x
xc2 = x**(-c)
xc2 = exp(-xc2)
return c*xc1*xc2
def _cdf(self, x, c):
xc1 = x**(-c)
return exp(-xc1)
def _ppf(self, q, c):
return pow(-log(q),arr(-1.0/c))
def _entropy(self, c):
return 1+_EULER + _EULER / c - log(c)
invweibull = invweibull_gen(a=0,name='invweibull',
longname="An inverted Weibull",
shapes='c',extradoc="""
Inverted Weibull distribution
invweibull.pdf(x,c) = c*x**(-c-1)*exp(-x**(-c))
for x > 0, c > 0.
"""
)
## Johnson SB
class johnsonsb_gen(rv_continuous):
def _argcheck(self, a, b):
return (b > 0) & (a==a)
def _pdf(self, x, a, b):
trm = norm.pdf(a+b*log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0/(1+exp(-1.0/b*(norm.ppf(q)-a)))
johnsonsb = johnsonsb_gen(a=0.0,b=1.0,name='johnsonb',
longname="A Johnson SB",
shapes="a, b",extradoc="""
Johnson SB distribution
johnsonsb.pdf(x,a,b) = b/(x*(1-x)) * phi(a + b*log(x/(1-x)))
for 0 < x < 1 and a,b > 0, and phi is the normal pdf.
"""
)
## Johnson SU
class johnsonsu_gen(rv_continuous):
def _argcheck(self, a, b):
return (b > 0) & (a==a)
def _pdf(self, x, a, b):
x2 = x*x
trm = norm.pdf(a+b*log(x+sqrt(x2+1)))
return b*1.0/sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x+sqrt(x*x+1)))
def _ppf(self, q, a, b):
return sinh((norm.ppf(q)-a)/b)
johnsonsu = johnsonsu_gen(name='johnsonsu',longname="A Johnson SU",
shapes="a, b", extradoc="""
Johnson SU distribution
johnsonsu.pdf(x,a,b) = b/sqrt(x**2+1) * phi(a + b*log(x+sqrt(x**2+1)))
for all x, a,b > 0, and phi is the normal pdf.
"""
)
## Laplace Distribution
class laplace_gen(rv_continuous):
def _rvs(self):
return mtrand.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*exp(-abs(x))
def _cdf(self, x):
return where(x > 0, 1.0-0.5*exp(-x), 0.5*exp(x))
def _ppf(self, q):
return where(q > 0.5, -log(2*(1-q)), log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return log(2)+1
laplace = laplace_gen(name='laplace', longname="A Laplace",
extradoc="""
Laplacian distribution
laplace.pdf(x) = 1/2*exp(-abs(x))
"""
)
## Levy Distribution
class levy_gen(rv_continuous):
def _pdf(self, x):
return 1/sqrt(2*pi*x)/x*exp(-1/(2*x))
def _cdf(self, x):
return 2*(1-norm._cdf(1/sqrt(x)))
def _ppf(self, q):
val = norm._ppf(1-q/2.0)
return 1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy = levy_gen(a=0.0,name="levy", longname = "A Levy", extradoc="""
Levy distribution
levy.pdf(x) = 1/(x*sqrt(2*pi*x)) * exp(-1/(2*x))
for x > 0.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
"""
)
## Left-skewed Levy Distribution
class levy_l_gen(rv_continuous):
def _pdf(self, x):
ax = abs(x)
return 1/sqrt(2*pi*ax)/ax*exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2*norm._cdf(1/sqrt(ax))-1
def _ppf(self, q):
val = norm._ppf((q+1.0)/2)
return -1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy_l = levy_l_gen(b=0.0,name="levy_l", longname = "A left-skewed Levy", extradoc="""
Left-skewed Levy distribution
levy_l.pdf(x) = 1/(abs(x)*sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for x < 0.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
"""
)
## Levy-stable Distribution (only random variates)
class levy_stable_gen(rv_continuous):
def _rvs(self, alpha, beta):
sz = self._size
TH = uniform.rvs(loc=-pi/2.0,scale=pi,size=sz)
W = expon.rvs(size=sz)
if alpha==1:
return 2/pi*(pi/2+beta*TH)*tan(TH)-beta*log((pi/2*W*cos(TH))/(pi/2+beta*TH))
# else
ialpha = 1.0/alpha
aTH = alpha*TH
if beta==0:
return W/(cos(TH)/tan(aTH)+sin(TH))*((cos(aTH)+sin(aTH)*tan(TH))/W)**ialpha
# else
val0 = beta*tan(pi*alpha/2)
th0 = arctan(val0)/alpha
val3 = W/(cos(TH)/tan(alpha*(th0+TH))+sin(TH))
res3 = val3*((cos(aTH)+sin(aTH)*tan(TH)-val0*(sin(aTH)-cos(aTH)*tan(TH)))/W)**ialpha
return res3
def _argcheck(self, alpha, beta):
if beta == -1:
self.b = 0.0
elif beta == 1:
self.a = 0.0
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable', longname="A Levy-stable",
shapes="alpha, beta", extradoc="""
Levy-stable distribution (only random variates available -- ignore other docs)
"""
)
## Logistic (special case of generalized logistic with c=1)
## Sech-squared
class logistic_gen(rv_continuous):
def _rvs(self):
return mtrand.logistic(size=self._size)
def _pdf(self, x):
ex = exp(-x)
return ex / (1+ex)**2.0
def _cdf(self, x):
return 1.0/(1+exp(-x))
def _ppf(self, q):
return -log(1.0/q-1)
def _stats(self):
return 0, pi*pi/3.0, 0, 6.0/5.0
def _entropy(self):
return 1.0
logistic = logistic_gen(name='logistic', longname="A logistic",
extradoc="""
Logistic distribution
logistic.pdf(x) = exp(-x)/(1+exp(-x))**2
"""
)
## Log Gamma
#
class loggamma_gen(rv_continuous):
def _rvs(self, c):
return log(mtrand.gamma(c, size=self._size))
def _pdf(self, x, c):
return exp(c*x-exp(x)-gamln(c))
def _cdf(self, x, c):
return special.gammainc(c, exp(x))
def _ppf(self, q, c):
return log(special.gammaincinv(c,q))
def _munp(self,n,*args):
# use generic moment calculation using ppf
return self._mom0_sc(n,*args)
loggamma = loggamma_gen(name='loggamma', longname="A log gamma", shapes='c',
extradoc="""
Log gamma distribution
loggamma.pdf(x,c) = exp(c*x-exp(x)) / gamma(c)
for all x, c > 0.
"""
)
## Log-Laplace (Log Double Exponential)
##
class loglaplace_gen(rv_continuous):
def _pdf(self, x, c):
cd2 = c/2.0
c = where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _entropy(self, c):
return log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace',
longname="A log-Laplace",shapes='c',
extradoc="""
Log-Laplace distribution (Log Double Exponential)
loglaplace.pdf(x,c) = c/2*x**(c-1) for 0 < x < 1
= c/2*x**(-c-1) for x >= 1
for c > 0.
"""
)
## Lognormal (Cobb-Douglass)
## std is a shape parameter and is the variance of the underlying
## distribution.
## the mean of the underlying distribution is log(scale)
class lognorm_gen(rv_continuous):
def _rvs(self, s):
return exp(s * norm.rvs(size=self._size))
def _pdf(self, x, s):
Px = exp(-log(x)**2 / (2*s**2))
return Px / (s*x*sqrt(2*pi))
def _cdf(self, x, s):
return norm.cdf(log(x)/s)
def _ppf(self, q, s):
return exp(s*norm._ppf(q))
def _stats(self, s):
p = exp(s*s)
mu = sqrt(p)
mu2 = p*(p-1)
g1 = sqrt((p-1))*(2+p)
g2 = numpy.polyval([1,2,3,0,-6.0],p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5*(1+log(2*pi)+2*log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm',
longname='A lognormal', shapes='s',
extradoc="""
Lognormal distribution
lognorm.pdf(x,s) = 1/(s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for x > 0, s > 0.
If log x is normally distributed with mean mu and variance sigma**2,
then x is log-normally distributed with shape paramter sigma and scale
parameter exp(mu).
"""
)
# Gibrat's distribution is just lognormal with s=1
class gilbrat_gen(lognorm_gen):
def _rvs(self):
return lognorm_gen._rvs(self, 1.0)
def _pdf(self, x):
return lognorm_gen._pdf(self, x, 1.0)
def _cdf(self, x):
return lognorm_gen._cdf(self, x, 1.0)
def _ppf(self, q):
return lognorm_gen._ppf(self, q, 1.0)
def _stats(self):
return lognorm_gen._stats(self, 1.0)
def _entropy(self):
return 0.5*log(2*pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat', longname='A Gilbrat',
extradoc="""
Gilbrat distribution
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
"""
)
# MAXWELL
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = 1.0 / sqrt(a)``, where a is the parameter used in
the Mathworld description [1]_.
Probability density function. Given by :math:`\sqrt(2/\pi)x^2 exp(-x^2/2)`
for ``x > 0``.
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0,size=self._size)
def _pdf(self, x):
return sqrt(2.0/pi)*x*x*exp(-x*x/2.0)
def _cdf(self, x):
return special.gammainc(1.5,x*x/2.0)
def _ppf(self, q):
return sqrt(2*special.gammaincinv(1.5,q))
def _stats(self):
val = 3*pi-8
return 2*sqrt(2.0/pi), 3-8/pi, sqrt(2)*(32-10*pi)/val**1.5, \
(-12*pi*pi + 160*pi - 384) / val**2.0
def _entropy(self):
return _EULER + 0.5*log(2*pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell', extradoc="""
Maxwell distribution
maxwell.pdf(x) = sqrt(2/pi) * x**2 * exp(-x**2/2)
for x > 0.
"""
)
# Mielke's Beta-Kappa
class mielke_gen(rv_continuous):
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q,s*1.0/k)
return pow(qsk/(1.0-qsk),1.0/s)
mielke = mielke_gen(a=0.0, name='mielke', longname="A Mielke's Beta-Kappa",
shapes="k, s", extradoc="""
Mielke's Beta-Kappa distribution
mielke.pdf(x,k,s) = k*x**(k-1) / (1+x**s)**(1+k/s)
for x > 0.
"""
)
# Nakagami (cf Chi)
class nakagami_gen(rv_continuous):
def _pdf(self, x, nu):
return 2*nu**nu/gam(nu)*(x**(2*nu-1.0))*exp(-nu*x*x)
def _cdf(self, x, nu):
return special.gammainc(nu,nu*x*x)
def _ppf(self, q, nu):
return sqrt(1.0/nu*special.gammaincinv(nu,q))
def _stats(self, nu):
mu = gam(nu+0.5)/gam(nu)/sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu*(1-4*nu*mu2)/2.0/nu/mu2**1.5
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami", longname="A Nakagami",
shapes='nu', extradoc="""
Nakagami distribution
nakagami.pdf(x,nu) = 2*nu**nu/gamma(nu) * x**(2*nu-1) * exp(-nu*x**2)
for x > 0, nu > 0.
"""
)
# Non-central chi-squared
# nc is lambda of definition, df is nu
class ncx2_gen(rv_continuous):
def _rvs(self, df, nc):
return mtrand.noncentral_chisquare(df,nc,self._size)
def _pdf(self, x, df, nc):
a = arr(df/2.0)
Px = exp(-nc/2.0)*special.hyp0f1(a,nc*x/4.0)
Px *= exp(-x/2.0)*x**(a-1) / arr(2**a * special.gamma(a))
return Px
def _cdf(self, x, df, nc):
return special.chndtr(x,df,nc)
def _ppf(self, q, df, nc):
return special.chndtrix(q,df,nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return df + nc, 2*val, sqrt(8)*(val+nc)/val**1.5, \
12.0*(val+2*nc)/val**2.0
ncx2 = ncx2_gen(a=0.0, name='ncx2', longname="A non-central chi-squared",
shapes="df, nc", extradoc="""
Non-central chi-squared distribution
ncx2.pdf(x,df,nc) = exp(-(nc+df)/2)*1/2*(x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for x > 0.
"""
)
# Non-central F
class ncf_gen(rv_continuous):
def _rvs(self, dfn, dfd, nc):
return mtrand.noncentral_f(dfn,dfd,nc,self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1,n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + gamln(n1/2.)+gamln(1+n2/2.)
term -= gamln((n1+n2)/2.0)
Px = exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= special.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)),n2/2,n1/2-1)
Px /= special.beta(n1/2,n2/2)
#this function does not have a return
# drop it for now, the generic function seems to work ok
def _cdf(self, x, dfn, dfd, nc):
return special.ncfdtr(dfn,dfd,nc,x)
def _ppf(self, q, dfn, dfd, nc):
return special.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn *1.0/dfd)**n
term = gamln(n+0.5*dfn) + gamln(0.5*dfd-n) - gamln(dfd*0.5)
val *= exp(-nc / 2.0+term)
val *= special.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = where(dfd <= 2, inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = where(dfd <=4, inf, 2*(dfd*1.0/dfn)**2.0 * \
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) / \
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf', longname="A non-central F distribution",
shapes="dfn, dfd, nc", extradoc="""
Non-central F distribution
ncf.pdf(x,df1,df2,nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2)))
* df1**(df1/2) * df2**(df2/2) * x**(df1/2-1)
* (df2+df1*x)**(-(df1+df2)/2)
* gamma(df1/2)*gamma(1+df2/2)
* L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2)))
/ (B(v1/2, v2/2) * gamma((v1+v2)/2))
for df1, df2, nc > 0.
"""
)
## Student t distribution
class t_gen(rv_continuous):
def _rvs(self, df):
return mtrand.standard_t(df, size=self._size)
#Y = f.rvs(df, df, size=self._size)
#sY = sqrt(Y)
#return 0.5*sqrt(df)*(sY-1.0/sY)
def _pdf(self, x, df):
r = arr(df*1.0)
Px = exp(gamln((r+1)/2)-gamln(r/2))
Px /= sqrt(r*pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = gamln((r+1)/2)-gamln(r/2)
lPx -= 0.5*log(r*pi) + (r+1)/2*log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return special.stdtr(df, x)
def _sf(self, x, df):
return special.stdtr(df, -x)
def _ppf(self, q, df):
return special.stdtrit(df, q)
def _isf(self, q, df):
return -special.stdtrit(df, q)
def _stats(self, df):
mu2 = where(df > 2, df / (df-2.0), inf)
g1 = where(df > 3, 0.0, nan)
g2 = where(df > 4, 6.0/(df-4.0), nan)
return 0, mu2, g1, g2
t = t_gen(name='t',longname="Student's T",
shapes="df", extradoc="""
Student's T distribution
gamma((df+1)/2)
t.pdf(x,df) = -----------------------------------------------
sqrt(pi*df)*gamma(df/2)*(1+x**2/df)**((df+1)/2)
for df > 0.
"""
)
## Non-central T distribution
class nct_gen(rv_continuous):
def _rvs(self, df, nc):
return norm.rvs(loc=nc,size=self._size)*sqrt(df) / sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*log(n) + gamln(n+1)
trm1 -= n*log(2)+nc*nc/2.+(n/2.)*log(fac1)+gamln(n/2.)
Px = exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = sqrt(2)*nc*x*special.hyp1f1(n/2+1,1.5,valF)
trm1 /= arr(fac1*special.gamma((n+1)/2))
trm2 = special.hyp1f1((n+1)/2,0.5,valF)
trm2 /= arr(sqrt(fac1)*special.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return special.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return special.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
val1 = gam((df-1.0)/2.0)
val2 = gam(df/2.0)
if 'm' in moments:
mu = nc*sqrt(df/2.0)*val1/val2
if 'v' in moments:
var = (nc*nc+1.0)*df/(df-2.0)
var -= nc*nc*df* val1**2 / 2.0 / val2**2
mu2 = var
if 's' in moments:
g1n = 2*nc*sqrt(df)*val1*((nc*nc*(2*df-7)-3)*val2**2 \
-nc*nc*(df-2)*(df-3)*val1**2)
g1d = (df-3)*sqrt(2*df*(nc*nc+1)/(df-2) - \
nc*nc*df*(val1/val2)**2) * val2 * \
(nc*nc*(df-2)*val1**2 - \
2*(nc*nc+1)*val2**2)
g1 = g1n/g1d
if 'k' in moments:
g2n = 2*(-3*nc**4*(df-2)**2 *(df-3) *(df-4)*val1**4 + \
2**(6-2*df) * nc*nc*(df-2)*(df-4)* \
(nc*nc*(2*df-7)-3)*pi* gam(df+1)**2 - \
4*(nc**4*(df-5)-6*nc*nc-3)*(df-3)*val2**4)
g2d = (df-3)*(df-4)*(nc*nc*(df-2)*val1**2 - \
2*(nc*nc+1)*val2)**2
g2 = g2n / g2d
return mu, mu2, g1, g2
nct = nct_gen(name="nct", longname="A Noncentral T",
shapes="df, nc", extradoc="""
Non-central Student T distribution
df**(df/2) * gamma(df+1)
nct.pdf(x,df,nc) = --------------------------------------------------
2**df*exp(nc**2/2)*(df+x**2)**(df/2) * gamma(df/2)
for df > 0, nc > 0.
"""
)
# Pareto
class pareto_gen(rv_continuous):
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = extract(mask,b)
mu = valarray(shape(b),value=inf)
mu = place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = extract( mask,b)
mu2 = valarray(shape(b), value=inf)
mu2 = place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = extract( mask,b)
g1 = valarray(shape(b), value=nan)
vals = 2*(bt+1.0)*sqrt(b-2.0)/((b-3.0)*sqrt(b))
g1 = place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = extract( mask,b)
g2 = valarray(shape(b), value=nan)
vals = 6.0*polyval([1.0,1.0,-6,-2],bt)/ \
polyval([1.0,-7.0,12.0,0.0],bt)
g2 = place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - log(c)
pareto = pareto_gen(a=1.0, name="pareto", longname="A Pareto",
shapes="b", extradoc="""
Pareto distribution
pareto.pdf(x,b) = b/x**(b+1)
for x >= 1, b > 0.
"""
)
# LOMAX (Pareto of the second kind.)
# Special case of Pareto of the first kind (location=-1.0)
class lomax_gen(rv_continuous):
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return log(c) - (c+1)*log(1+x)
def _cdf(self, x, c):
return 1.0-1.0/(1.0+x)**c
def _sf(self, x, c):
return 1.0/(1.0+x)**c
def _logsf(self, x, c):
return -c*log(1+x)
def _ppf(self, q, c):
return pow(1.0-q,-1.0/c)-1
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-log(c)
lomax = lomax_gen(a=0.0, name="lomax",
longname="A Lomax (Pareto of the second kind)",
shapes="c", extradoc="""
Lomax (Pareto of the second kind) distribution
lomax.pdf(x,c) = c / (1+x)**(c+1)
for x >= 0, c > 0.
"""
)
## Power-function distribution
## Special case of beta dist. with d =1.0
class powerlaw_gen(rv_continuous):
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return log(a) + (a-1)*log(x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return a/(a+1.0), a*(a+2.0)/(a+1.0)**2, \
2*(1.0-a)*sqrt((a+2.0)/(a*(a+3.0))), \
6*polyval([1,-1,-6,2],a)/(a*(a+3.0)*(a+4))
def _entropy(self, a):
return 1 - 1.0/a - log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw",
longname="A power-function",
shapes="a", extradoc="""
Power-function distribution
powerlaw.pdf(x,a) = a*x**(a-1)
for 0 <= x <= 1, a > 0.
"""
)
# Power log normal
class powerlognorm_gen(rv_continuous):
def _pdf(self, x, c, s):
return c/(x*s)*norm.pdf(log(x)/s)*pow(norm.cdf(-log(x)/s),c*1.0-1.0)
def _cdf(self, x, c, s):
return 1.0 - pow(norm.cdf(-log(x)/s),c*1.0)
def _ppf(self, q, c, s):
return exp(-s*norm.ppf(pow(1.0-q,1.0/c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm",
longname="A power log-normal",
shapes="c, s", extradoc="""
Power log-normal distribution
powerlognorm.pdf(x,c,s) = c/(x*s) * phi(log(x)/s) * (Phi(-log(x)/s))**(c-1)
where phi is the normal pdf, and Phi is the normal cdf, and x > 0, s,c > 0.
"""
)
# Power Normal
class powernorm_gen(rv_continuous):
def _pdf(self, x, c):
return c*_norm_pdf(x)* \
(_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -norm.ppf(pow(1.0-q,1.0/c))
powernorm = powernorm_gen(name='powernorm', longname="A power normal",
shapes="c", extradoc="""
Power normal distribution
powernorm.pdf(x,c) = c * phi(x)*(Phi(-x))**(c-1)
where phi is the normal pdf, and Phi is the normal cdf, and x > 0, c > 0.
"""
)
# R-distribution ( a general-purpose distribution with a
# variety of shapes.
# FIXME: PPF does not work.
class rdist_gen(rv_continuous):
def _pdf(self, x, c):
return np.power((1.0-x*x),c/2.0-1) / special.beta(0.5,c/2.0)
def _cdf_skip(self, x, c):
#error inspecial.hyp2f1 for some values see tickets 758, 759
return 0.5 + x/special.beta(0.5,c/2.0)* \
special.hyp2f1(0.5,1.0-c/2.0,1.5,x*x)
def _munp(self, n, c):
return (1-(n % 2))*special.beta((n+1.0)/2,c/2.0)
rdist = rdist_gen(a=-1.0,b=1.0, name="rdist", longname="An R-distributed",
shapes="c", extradoc="""
R-distribution
rdist.pdf(x,c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for -1 <= x <= 1, c > 0.
"""
)
# Rayleigh distribution (this is chi with df=2 and loc=0.0)
# scale is the mode.
class rayleigh_gen(rv_continuous):
def _rvs(self):
return chi.rvs(2,size=self._size)
def _pdf(self, r):
return r*exp(-r*r/2.0)
def _cdf(self, r):
return 1.0-exp(-r*r/2.0)
def _ppf(self, q):
return sqrt(-2*log(1-q))
def _stats(self):
val = 4-pi
return np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5, \
6*pi/val-16/val**2
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh",
longname="A Rayleigh",
extradoc="""
Rayleigh distribution
rayleigh.pdf(r) = r * exp(-r**2/2)
for x >= 0.
"""
)
# Reciprocal Distribution
class reciprocal_gen(rv_continuous):
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
# argcheck should be called before _pdf
return 1.0/(x*self.d)
def _logpdf(self, x, a, b):
return -log(x) - log(self.d)
def _cdf(self, x, a, b):
return (log(x)-log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a,q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0,n) - pow(a*1.0,n))
def _entropy(self,a,b):
return 0.5*log(a*b)+log(log(b/a))
reciprocal = reciprocal_gen(name="reciprocal",
longname="A reciprocal",
shapes="a, b", extradoc="""
Reciprocal distribution
reciprocal.pdf(x,a,b) = 1/(x*log(b/a))
for a <= x <= b, a,b > 0.
"""
)
# Rice distribution
# FIXME: PPF does not work.
class rice_gen(rv_continuous):
def _pdf(self, x, b):
return x*exp(-(x*x+b*b)/2.0)*special.i0(x*b)
def _logpdf(self, x, b):
return log(x) - (x*x + b*b)/2.0 + log(special.i0(x*b))
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1+nd2
b2 = b*b/2.0
return 2.0**(nd2)*exp(-b2)*special.gamma(n1) * \
special.hyp1f1(n1,1,b2)
rice = rice_gen(a=0.0, name="rice", longname="A Rice",
shapes="b", extradoc="""
Rician distribution
rice.pdf(x,b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for x > 0, b > 0.
"""
)
# Reciprocal Inverse Gaussian
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
def _rvs(self, mu): #added, taken from invgauss
return 1.0/mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x)*exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*log(2*pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-exp(2.0/mu)*_norm_cdf(-isqx*trm2)
# xb=50 or something large is necessary for stats to converge without exception
recipinvgauss = recipinvgauss_gen(a=0.0, xb=50, name='recipinvgauss',
longname="A reciprocal inverse Gaussian",
shapes="mu", extradoc="""
Reciprocal inverse Gaussian
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for x >= 0.
"""
)
# Semicircular
class semicircular_gen(rv_continuous):
def _pdf(self, x):
return 2.0/pi*sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/pi*(x*sqrt(1-x*x) + arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0,b=1.0, name="semicircular",
longname="A semicircular",
extradoc="""
Semicircular distribution
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for -1 <= x <= 1.
"""
)
# Triangular
# up-sloping line from loc to (loc + c*scale) and then downsloping line from
# loc + c*scale to loc + scale
# _trstr = "Left must be <= mode which must be <= right with left < right"
class triang_gen(rv_continuous):
def _rvs(self, c):
return mtrand.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return where(q < c, sqrt(c*q), 1-sqrt((1-c)*(1-q)))
def _stats(self, c):
return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
(5*(1.0-c+c*c)**1.5), -3.0/5.0
def _entropy(self,c):
return 0.5-log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang", longname="A Triangular",
shapes="c", extradoc="""
Triangular distribution
up-sloping line from loc to (loc + c*scale) and then downsloping
for (loc + c*scale) to (loc+scale).
- standard form is in the range [0,1] with c the mode.
- location parameter shifts the start to loc
- scale changes the width from 1 to scale
"""
)
# Truncated Exponential
class truncexpon_gen(rv_continuous):
def _argcheck(self, b):
self.b = b
return (b > 0)
def _pdf(self, x, b):
return exp(-x)/(1-exp(-b))
def _logpdf(self, x, b):
return -x - log(1-exp(-b))
def _cdf(self, x, b):
return (1.0-exp(-x))/(1-exp(-b))
def _ppf(self, q, b):
return -log(1-q+q*exp(-b))
def _munp(self, n, b):
#wrong answer with formula, same as in continuous.pdf
#return gam(n+1)-special.gammainc(1+n,b)
if n == 1:
return (1-(b+1)*exp(-b))/(-expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*exp(-b))/(-expm1(-b))
else:
#return generic for higher moments
#return rv_continuous._mom1_sc(self,n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = exp(b)
return log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon',
longname="A truncated exponential",
shapes="b", extradoc="""
Truncated exponential distribution
truncexpon.pdf(x,b) = exp(-x)/(1-exp(-b))
for 0 < x < b.
"""
)
# Truncated Normal
class truncnorm_gen(rv_continuous):
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._delta = self._nb - self._na
self._logdelta = log(self._delta)
return (a != b)
# All of these assume that _argcheck is called first
# and no other thread calls _pdf before.
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
return norm._ppf(q*self._nb + self._na*(1.0-q))
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d #correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm', longname="A truncated normal",
shapes="a, b", extradoc="""
Truncated Normal distribution.
The standard form of this distribution is a standard normal truncated to the
range [a,b] --- notice that a and b are defined over the domain
of the standard normal. To convert clip values for a specific mean and
standard deviation use a,b = (myclip_a-my_mean)/my_std, (myclip_b-my_mean)/my_std
"""
)
# Tukey-Lambda
# A flexible distribution ranging from Cauchy (lam=-1)
# to logistic (lam=0.0)
# to approx Normal (lam=0.14)
# to u-shape (lam = 0.5)
# to Uniform from -1 to 1 (lam = 1)
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
def _argcheck(self, lam):
# lam in RR.
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = arr(special.tklmbda(x,lam))
Px = Fx**(lam-1.0) + (arr(1-Fx))**(lam-1.0)
Px = 1.0/arr(Px)
return where((lam <= 0) | (abs(x) < 1.0/arr(lam)), Px, 0.0)
def _cdf(self, x, lam):
return special.tklmbda(x, lam)
def _ppf(self, q, lam):
q = q*1.0
vals1 = (q**lam - (1-q)**lam)/lam
vals2 = log(q/(1-q))
return where((lam == 0)&(q==q), vals2, vals1)
def _stats(self, lam):
mu2 = 2*gam(lam+1.5)-lam*pow(4,-lam)*sqrt(pi)*gam(lam)*(1-2*lam)
mu2 /= lam*lam*(1+2*lam)*gam(1+1.5)
mu4 = 3*gam(lam)*gam(lam+0.5)*pow(2,-2*lam) / lam**3 / gam(2*lam+1.5)
mu4 += 2.0/lam**4 / (1+4*lam)
mu4 -= 2*sqrt(3)*gam(lam)*pow(2,-6*lam)*pow(3,3*lam) * \
gam(lam+1.0/3)*gam(lam+2.0/3) / (lam**3.0 * gam(2*lam+1.5) * \
gam(lam+0.5))
g2 = mu4 / mu2 / mu2 - 3.0
return 0, mu2, 0, g2
def _entropy(self, lam):
def integ(p):
return log(pow(p,lam-1)+pow(1-p,lam-1))
return integrate.quad(integ,0,1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda', longname="A Tukey-Lambda",
shapes="lam", extradoc="""
Tukey-Lambda distribution
A flexible distribution ranging from Cauchy (lam=-1)
to logistic (lam=0.0)
to approx Normal (lam=0.14)
to u-shape (lam = 0.5)
to Uniform from -1 to 1 (lam = 1)
"""
)
# Uniform
# loc to loc + scale
class uniform_gen(rv_continuous):
def _rvs(self):
return mtrand.uniform(0.0,1.0,self._size)
def _pdf(self, x):
return 1.0*(x==x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0,b=1.0, name='uniform', longname="A uniform",
extradoc="""
Uniform distribution
constant between loc and loc+scale
"""
)
# Von-Mises
# if x is not in range or loc is not in range it assumes they are angles
# and converts them to [-pi, pi] equivalents.
eps = numpy.finfo(float).eps
class vonmises_gen(rv_continuous):
def _rvs(self, b):
return mtrand.vonmises(0.0, b, size=self._size)
def _pdf(self, x, b):
return exp(b*cos(x)) / (2*pi*special.i0(b))
def _cdf(self, x, b):
return vonmises_cython.von_mises_cdf(b,x)
def _stats_skip(self, b):
return 0, None, 0, None
vonmises = vonmises_gen(name='vonmises', longname="A Von Mises",
shapes="b", extradoc="""
Von Mises distribution
if x is not in range or loc is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
vonmises.pdf(x,b) = exp(b*cos(x)) / (2*pi*I[0](b))
for -pi <= x <= pi, b > 0.
"""
)
## Wald distribution (Inverse Normal with shape parameter mu=1.0)
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function, `pdf`, is defined by
``1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))``, for ``x > 0``.
%(example)s
"""
def _rvs(self):
return mtrand.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald", extradoc="""
Wald distribution
wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for x > 0.
"""
)
## Weibull
## See Frechet
# Wrapped Cauchy
class wrapcauchy_gen(rv_continuous):
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*pi*(1+c*c-2*c*cos(x)))
def _cdf(self, x, c):
output = 0.0*x
val = (1.0+c)/(1.0-c)
c1 = x<pi
c2 = 1-c1
xp = extract( c1,x)
#valp = extract(c1,val)
xn = extract( c2,x)
#valn = extract(c2,val)
if (any(xn)):
valn = extract(c2, np.ones_like(x)*val)
xn = 2*pi - xn
yn = tan(xn/2.0)
on = 1.0-1.0/pi*arctan(valn*yn)
output = place(output, c2, on)
if (any(xp)):
valp = extract(c1, np.ones_like(x)*val)
yp = tan(xp/2.0)
op = 1.0/pi*arctan(valp*yp)
output = place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*arctan(val*tan(pi*q))
rcmq = 2*pi-2*arctan(val*tan(pi*(1-q)))
return where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return log(2*pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0,b=2*pi, name='wrapcauchy',
longname="A wrapped Cauchy",
shapes="c", extradoc="""
Wrapped Cauchy distribution
wrapcauchy.pdf(x,c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for 0 <= x <= 2*pi, 0 < c < 1.
"""
)
### DISCRETE DISTRIBUTIONS
###
def entropy(pk,qk=None):
"""S = entropy(pk,qk=None)
calculate the entropy of a distribution given the p_k values
S = -sum(pk * log(pk), axis=0)
If qk is not None, then compute a relative entropy
S = sum(pk * log(pk / qk), axis=0)
Routine will normalize pk and qk if they don't sum to 1
"""
pk = arr(pk)
pk = 1.0* pk / sum(pk,axis=0)
if qk is None:
vec = where(pk == 0, 0.0, pk*log(pk))
else:
qk = arr(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk,axis=0)
# If qk is zero anywhere, then unless pk is zero at those places
# too, the relative entropy is infinite.
if any(take(pk,nonzero(qk==0.0),axis=0)!=0.0, 0):
return inf
vec = where (pk == 0, 0.0, -pk*log(pk / qk))
return -sum(vec,axis=0)
## Handlers for generic case where xk and pk are given
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk>xk),axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals>=q),axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = arr(n)
return sum(self.xk**n[newaxis,...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = arr(t)
return sum(exp(self.xk * t[newaxis,...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
'''non-central moment of discrete distribution'''
#many changes, originally not even a return
tot = 0.0
diff = 1e100
#pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
#handle cases with infinite support
ulimit = max(1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
llimit = min(-1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
while (pos <= self.b) and ((pos <= ulimit) or \
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
# use pmf because _pmf does not check support in randint
# and there might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or \
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
#using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.invcdf_b
a = self.invcdf_a
if isinf(b): # Be sure ending point is > q
b = max(100*q,10)
while 1:
if b >= self.b: qb = 1.0; break
qb = self._cdf(b,*args)
if (qb < q): b += 10
else: break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = min(-100*q,-10)
while 1:
if a <= self.a: qb = 0.0; break
qa = self._cdf(a,*args)
if (qa > q): a -= 10
else: break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b == a+1:
#testcase: return wrong number at lower index
#python -c "from scipy.stats import zipf;print zipf.ppf(0.01,2)" wrong
#python -c "from scipy.stats import zipf;print zipf.ppf([0.01,0.61,0.77,0.83],2)"
#python -c "from scipy.stats import logser;print logser.ppf([0.1,0.66, 0.86,0.93],0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
a = c
qa = qc
elif (qc > q):
b = c
qb = qc
else:
return c
def reverse_dict(dict):
newdict = {}
sorted_keys = copy(dict.keys())
sorted_keys.sort()
for key in sorted_keys[::-1]:
newdict[dict[key]] = key
return newdict
def make_dict(keys, values):
d = {}
for key, value in zip(keys, values):
d[key] = value
return d
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances from for discrete random variables. rv_discrete can be used
to construct an arbitrary distribution with defined by a list of support
points and the corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments
values : tuple of two array_like
(xk, pk) where xk are points (integers) with positive probability pk
with sum(pk) = 1
inc : integer
increment for the support of the distribution, default: 1
other values have not been tested
badvalue : object, optional
The value in (masked) arrays that indicates a value that should be
ignored.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the first two arguments for all
its methods.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
generic.rvs(<shape(s)>, loc=0, size=1)
random variates
generic.pmf(x, <shape(s)>, loc=0)
probability mass function
logpmf(x, <shape(s)>, loc=0)
log of the probability density function
generic.cdf(x, <shape(s)>, loc=0)
cumulative density function
generic.logcdf(x, <shape(s)>, loc=0)
log of the cumulative density function
generic.sf(x, <shape(s)>, loc=0)
survival function (1-cdf --- sometimes more accurate)
generic.logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
generic.ppf(q, <shape(s)>, loc=0)
percent point function (inverse of cdf --- percentiles)
generic.isf(q, <shape(s)>, loc=0)
inverse survival function (inverse of sf)
generic.moment(n, <shape(s)>, loc=0)
non-central n-th moment of the distribution. May not work for array arguments.
generic.stats(<shape(s)>, loc=0, moments='mv')
mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k')
generic.entropy(<shape(s)>, loc=0)
entropy of the RV
generic.fit(data, <shape(s)>, loc=0)
Parameter estimates for generic data
generic.expect(func=None, args=(), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
generic.median(<shape(s)>, loc=0)
Median of the distribution.
generic.mean(<shape(s)>, loc=0)
Mean of the distribution.
generic.std(<shape(s)>, loc=0)
Standard deviation of the distribution.
generic.var(<shape(s)>, loc=0)
Variance of the distribution.
generic.interval(alpha, <shape(s)>, loc=0)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
generic(<shape(s)>, loc=0)
calling a distribution instance returns a frozen distribution
Notes
-----
Alternatively, the object may be called (as a function) to fix
the shape and location parameters returning a
"frozen" discrete RV object:
myrv = generic(<shape(s)>, loc=0)
- frozen RV object with the same methods but holding the given shape
and location fixed.
You can construct an aribtrary discrete rv where P{X=xk} = pk
by passing to the rv_discrete initialization method (through the
values=keyword) a tuple of sequences (xk, pk) which describes only those
values of X (xk) that occur with nonzero probability (pk).
To create a new discrete distribution, we would do the following::
class poisson_gen(rv_continuous):
#"Poisson distribution"
def _pmf(self, k, mu):
...
and create an instance
poisson = poisson_gen(name="poisson", shapes="mu", longname='A Poisson')
The docstring can be created from a template.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> numargs = generic.numargs
>>> [ <shape(s)> ] = ['Replace with resonable value', ]*numargs
Display frozen pmf:
>>> rv = generic(<shape(s)>)
>>> x = np.arange(0, np.min(rv.dist.b, 3)+1)
>>> h = plt.plot(x, rv.pmf(x))
Check accuracy of cdf and ppf:
>>> prb = generic.cdf(x, <shape(s)>)
>>> h = plt.semilogy(np.abs(x-generic.ppf(prb, <shape(s)>))+1e-20)
Random number generation:
>>> R = generic.rvs(<shape(s)>, size=100)
Custom made discrete distribution:
>>> vals = [arange(7), (0.1, 0.2, 0.3, 0.1, 0.1, 0.1, 0.1)]
>>> custm = rv_discrete(name='custm', values=vals)
>>> h = plt.plot(vals[0], custm.pmf(vals[0]))
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8,values=None,inc=1,longname=None,
shapes=None, extradoc=None):
super(rv_generic,self).__init__()
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.invcdf_a = a # what's the difference to self.a, .b
self.invcdf_b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = sgf(self._cdfsingle,otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk),indx, 0)
self.pk = take(ravel(self.pk),indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = make_dict(self.xk, self.pk)
self.qvals = numpy.cumsum(self.pk,axis=0)
self.F = make_dict(self.xk, self.qvals)
self.Finv = reverse_dict(self.F)
self._ppf = instancemethod(sgf(_drv_ppf,otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(sgf(_drv_pmf,otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(sgf(_drv_cdf,otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self.numargs=0
else:
cdf_signature = inspect.getargspec(self._cdf.im_func)
numargs1 = len(cdf_signature[0]) - 2
pmf_signature = inspect.getargspec(self._pmf.im_func)
numargs2 = len(pmf_signature[0]) - 2
self.numargs = max(numargs1, numargs2)
#nin correction needs to be after we know numargs
#correct nin for generic moment vectorization
self.vec_generic_moment = sgf(_drv2_moment, otypes='d')
self.vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(self.vec_generic_moment,
self, rv_discrete)
#correct nin for ppf vectorization
_vppf = sgf(_drv2_ppfsingle,otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._vecppf = instancemethod(_vppf,
self, rv_discrete)
#now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the rv_discrete template."""
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s discrete random variable.'%longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict_discrete.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array-like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _rvs(self, *args):
return self._ppf(mtrand.random_sample(self._size),*args)
def _nonzero(self, k, *args):
return floor(k)==k
def _argcheck(self, *args):
cond = 1
for arg in args:
cond &= (arg > 0)
return cond
def _pmf(self, k, *args):
return self._cdf(k,*args) - self._cdf(k-1,*args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdfsingle(self, k, *args):
m = arange(int(self.a),k+1)
return sum(self._pmf(m,*args),axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._vecppf(q, *args)
def _isf(self, q, *args):
return self._ppf(1-q,*args)
def _stats(self, *args):
return None, None, None, None
def _munp(self, n, *args):
return self.generic_moment(n, *args)
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
size : int or tuple of ints, optional
defining number of random variates (default=1)
Returns
-------
rvs : array-like
random variates of given `size`
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k,*args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
pmf : array-like
Probability mass function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._pmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k,*args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
logpmf : array-like
Log of the probability mass function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function at k of the given RV
Parameters
----------
k : array-like, int
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
cdf : array-like
Cumulative distribution function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2*(cond0==cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV
Parameters
----------
k : array-like, int
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
logcdf : array-like
Log of the cumulative distribution function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2*(cond0==cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,k,*args,**kwds):
"""
Survival function (1-cdf) at k of the given RV
Parameters
----------
k : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
sf : array-like
Survival function evaluated at k
"""
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,k,*args,**kwds):
"""
Log of the survival function (1-cdf) at k of the given RV
Parameters
----------
k : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
sf : array-like
Survival function evaluated at k
"""
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV
Parameters
----------
q : array-like
lower tail probability
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale: array-like, optional
scale parameter (default=1)
Returns
-------
k : array-like
quantile corresponding to the lower tail probability, q.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
q,loc = map(arr,(q,loc))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.badvalue,typecode='d')
#output type 'd' to handle nin and inf
output = place(output,(q==0)*(cond==cond), self.a-1)
output = place(output,cond2,self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
output = place(output,cond,self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function (1-sf) at q of the given RV
Parameters
----------
q : array-like
upper tail probability
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
k : array-like
quantile corresponding to the upper tail probability, q.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
q,loc = map(arr,(q,loc))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
#old:
## output = valarray(shape(cond),value=self.b,typecode='d')
## #typecode 'd' to handle nin and inf
## output = place(output,(1-cond0)*(cond1==cond1), self.badvalue)
## output = place(output,cond2,self.a-1)
#same problem as with ppf
# copied from ppf and changed
output = valarray(shape(cond),value=self.badvalue,typecode='d')
#output type 'd' to handle nin and inf
output = place(output,(q==0)*(cond==cond), self.b)
output = place(output,cond2,self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
output = place(output,cond,self._isf(*goodargs) + loc) #PB same as ticket 766
if output.ndim == 0:
return output[()]
return output
def stats(self, *args, **kwds):
"""
Some statistics of the given discrete RV
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
moments : string, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
loc,moments=map(kwds.get,['loc','moments'])
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None: # loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and moments is None: # loc, scale, and moments
loc, moments = args[-2:]
args = args[:self.numargs]
if loc is None: loc = 0.0
if moments is None: moments = 'mv'
loc = arr(loc)
args = tuple(map(arr,args))
cond = self._argcheck(*args) & (loc==loc)
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*(mu2**1.5)
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
goodargs = argsreduce(cond, *(args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
out0 = place(out0,cond,mu+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
out0 = default.copy()
out0 = place(out0,cond,mu2)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / mu2**1.5
out0 = default.copy()
out0 = place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
out0 = place(out0,cond,g2)
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds): # Non-central moments in standard form.
"""
n'th non-central moment of the distribution
Parameters
----------
n: int, n>=1
order of moment
arg1, arg2, arg3,...: float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : float, optional
location parameter (default=0)
scale : float, optional
scale parameter (default=1)
"""
loc = kwds.get('loc', 0)
scale = kwds.get('scale', 1)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0): raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
dict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
dict = {}
mu, mu2, g1, g2 = self._stats(*args,**dict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def freeze(self, *args, **kwds):
return rv_frozen(self, *args, **kwds)
def _entropy(self, *args):
if hasattr(self,'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments':'m'}))
val = self.pmf(mu,*args)
if (val==0.0): ent = 0.0
else: ent = -val*log(val)
k = 1
term = 1.0
while (abs(term) > eps):
val = self.pmf(mu+k,*args)
if val == 0.0: term = 0.0
else: term = -val * log(val)
val = self.pmf(mu-k,*args)
if val != 0.0: term -= val*log(val)
k += 1
ent += term
return ent
def entropy(self, *args, **kwds):
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
loc = arr(loc)
args = map(arr,args)
cond0 = self._argcheck(*args) & (loc==loc)
output = zeros(shape(cond0),'d')
output = place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
output = place(output,cond0,self.vecentropy(*goodargs))
return output
def __call__(self, *args, **kwds):
return self.freeze(*args,**kwds)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False):
"""calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
fn : function (default: identity mapping)
Function for which sum is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
optional keyword parameters
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : boolean (False)
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expected value : float
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to evaluate
could be added as keyword parameter, to evaluate functions with
non-monotonic shapes, points include integers in (-suppnmin, suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative integers
are evaluated)
"""
#moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
#avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 #minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
#loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
#loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint
# and there might be problems(?) with correct self.a, self.b at this stage
# maybe not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc #convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc #convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
#work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1,*args)
else:
invfac = 1 - self.cdf(lb-1,*args) - self.sf(ub,*args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) #check limits
#print 'low, upp', low, upp
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
#handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while (pos >= lb) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
# fixme: replace with proper warning
print 'sum did not converge'
return tot/invfac
# Binomial
class binom_gen(rv_discrete):
def _rvs(self, n, pr):
return mtrand.binomial(n,pr,self._size)
def _argcheck(self, n, pr):
self.b = n
return (n>=0) & (pr >= 0) & (pr <= 1)
def _logpmf(self, x, n, pr):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) +
gamln(n-k+1)))
return combiln + k*np.log(pr) + (n-k)*np.log(1-pr)
def _pmf(self, x, n, pr):
return exp(self._logpmf(x, n, pr))
def _cdf(self, x, n, pr):
k = floor(x)
vals = special.bdtr(k,n,pr)
return vals
def _sf(self, x, n, pr):
k = floor(x)
return special.bdtrc(k,n,pr)
def _ppf(self, q, n, pr):
vals = ceil(special.bdtrik(q,n,pr))
vals1 = vals-1
temp = special.bdtr(vals1,n,pr)
return where(temp >= q, vals1, vals)
def _stats(self, n, pr):
q = 1.0-pr
mu = n * pr
var = n * pr * q
g1 = (q-pr) / sqrt(n*pr*q)
g2 = (1.0-6*pr*q)/(n*pr*q)
return mu, var, g1, g2
def _entropy(self, n, pr):
k = r_[0:n+1]
vals = self._pmf(k,n,pr)
lvals = where(vals==0,0.0,log(vals))
return -sum(vals*lvals,axis=0)
binom = binom_gen(name='binom',shapes="n, pr",extradoc="""
Binomial distribution
Counts the number of successes in *n* independent
trials when the probability of success each time is *pr*.
binom.pmf(k,n,p) = choose(n,k)*p**k*(1-p)**(n-k)
for k in {0,1,...,n}
""")
# Bernoulli distribution
class bernoulli_gen(binom_gen):
def _rvs(self, pr):
return binom_gen._rvs(self, 1, pr)
def _argcheck(self, pr):
return (pr >=0 ) & (pr <= 1)
def _logpmf(self, x, pr):
return binom._logpmf(x, 1, pr)
def _pmf(self, x, pr):
return binom._pmf(x, 1, pr)
def _cdf(self, x, pr):
return binom._cdf(x, 1, pr)
def _sf(self, x, pr):
return binom._sf(x, 1, pr)
def _ppf(self, q, pr):
return binom._ppf(q, 1, pr)
def _stats(self, pr):
return binom._stats(1, pr)
def _entropy(self, pr):
return -pr*log(pr)-(1-pr)*log(1-pr)
bernoulli = bernoulli_gen(b=1,name='bernoulli',shapes="pr",extradoc="""
Bernoulli distribution
1 if binary experiment succeeds, 0 otherwise. Experiment
succeeds with probabilty *pr*.
bernoulli.pmf(k,p) = 1-p if k = 0
= p if k = 1
for k = 0,1
"""
)
# Negative binomial
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
Probability mass function, given by
``np.choose(k+n-1, n-1) * p**n * (1-p)**k`` for ``k >= 0``.
%(example)s
"""
def _rvs(self, n, pr):
return mtrand.negative_binomial(n, pr, self._size)
def _argcheck(self, n, pr):
return (n >= 0) & (pr >= 0) & (pr <= 1)
def _pmf(self, x, n, pr):
coeff = exp(gamln(n+x) - gamln(x+1) - gamln(n))
return coeff * power(pr,n) * power(1-pr,x)
def _logpmf(self, x, n, pr):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(pr) + x*log(1-pr)
def _cdf(self, x, n, pr):
k = floor(x)
return special.betainc(n, k+1, pr)
def _sf_skip(self, x, n, pr):
#skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k,n,pr)
def _ppf(self, q, n, pr):
vals = ceil(special.nbdtrik(q,n,pr))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1,n,pr)
return where(temp >= q, vals1, vals)
def _stats(self, n, pr):
Q = 1.0 / pr
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom', shapes="n, pr", extradoc="""
Negative binomial distribution
nbinom.pmf(k,n,p) = choose(k+n-1,n-1) * p**n * (1-p)**k
for k >= 0.
"""
)
## Geometric distribution
class geom_gen(rv_discrete):
def _rvs(self, pr):
return mtrand.geometric(pr,size=self._size)
def _argcheck(self, pr):
return (pr<=1) & (pr >= 0)
def _pmf(self, k, pr):
return (1-pr)**(k-1) * pr
def _logpmf(self, k, pr):
return (k-1)*log(1-pr) + pr
def _cdf(self, x, pr):
k = floor(x)
return (1.0-(1.0-pr)**k)
def _sf(self, x, pr):
k = floor(x)
return (1.0-pr)**k
def _ppf(self, q, pr):
vals = ceil(log(1.0-q)/log(1-pr))
temp = 1.0-(1.0-pr)**(vals-1)
return where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, pr):
mu = 1.0/pr
qr = 1.0-pr
var = qr / pr / pr
g1 = (2.0-pr) / sqrt(qr)
g2 = numpy.polyval([1,-6,6],pr)/(1.0-pr)
return mu, var, g1, g2
geom = geom_gen(a=1,name='geom', longname="A geometric",
shapes="pr", extradoc="""
Geometric distribution
geom.pmf(k,p) = (1-p)**(k-1)*p
for k >= 1
"""
)
## Hypergeometric distribution
class hypergeom_gen(rv_discrete):
def _rvs(self, M, n, N):
return mtrand.hypergeometric(n,M-n,N,size=self._size)
def _argcheck(self, M, n, N):
cond = rv_discrete._argcheck(self,M,n,N)
cond &= (n <= M) & (N <= M)
self.a = N-(M-n)
self.b = min(n,N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
#same as the following but numerically more precise
#return comb(good,k) * comb(bad,N-k) / comb(tot,N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
tot, good = M, n
n = good*1.0
m = (tot-good)*1.0
N = N*1.0
tot = m+n
p = n/tot
mu = N*p
var = m*n*N*(tot-N)*1.0/(tot*tot*(tot-1))
g1 = (m - n)*(tot-2*N) / (tot-2.0)*sqrt((tot-1.0)/(m*n*N*(tot-N)))
m2, m3, m4, m5 = m**2, m**3, m**4, m**5
n2, n3, n4, n5 = n**2, n**2, n**4, n**5
g2 = m3 - m5 + n*(3*m2-6*m3+m4) + 3*m*n2 - 12*m2*n2 + 8*m3*n2 + n3 \
- 6*m*n3 + 8*m2*n3 + m*n4 - n5 - 6*m3*N + 6*m4*N + 18*m2*n*N \
- 6*m3*n*N + 18*m*n2*N - 24*m2*n2*N - 6*n3*N - 6*m*n3*N \
+ 6*n4*N + N*N*(6*m2 - 6*m3 - 24*m*n + 12*m2*n + 6*n2 + \
12*m*n2 - 6*n3)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = r_[N-(M-n):min(n,N)+1]
vals = self.pmf(k,M,n,N)
lvals = where(vals==0.0,0.0,log(vals))
return -sum(vals*lvals,axis=0)
hypergeom = hypergeom_gen(name='hypergeom',longname="A hypergeometric",
shapes="M, n, N", extradoc="""
Hypergeometric distribution
Models drawing objects from a bin.
M is total number of objects, n is total number of Type I objects.
RV counts number of Type I objects in N drawn without replacement from
population.
hypergeom.pmf(k, M, n, N) = choose(n,k)*choose(M-n,N-k)/choose(M,N)
for N - (M-n) <= k <= min(m,N)
"""
)
## Logarithmic (Log-Series), (Series) distribution
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
def _rvs(self, pr):
# looks wrong for pr>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return mtrand.logseries(pr,size=self._size)
def _argcheck(self, pr):
return (pr > 0) & (pr < 1)
def _pmf(self, k, pr):
return -pr**k * 1.0 / k / log(1-pr)
def _stats(self, pr):
r = log(1-pr)
mu = pr / (pr - 1.0) / r
mu2p = -pr / r / (pr-1.0)**2
var = mu2p - mu*mu
mu3p = -pr / r * (1.0+pr) / (1.0-pr)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / var**1.5
mu4p = -pr / r * (1.0/(pr-1)**2 - 6*pr/(pr-1)**3 + \
6*pr*pr / (pr-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1,name='logser', longname='A logarithmic',
shapes='pr', extradoc="""
Logarithmic (Log-Series, Series) distribution
logser.pmf(k,p) = - p**k / (k*log(1-p))
for k >= 1
"""
)
## Poisson distribution
class poisson_gen(rv_discrete):
def _rvs(self, mu):
return mtrand.poisson(mu, self._size)
def _pmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return exp(Pk)
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k,mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k,mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q,mu))
vals1 = vals-1
temp = special.pdtr(vals1,mu)
return where((temp >= q), vals1, vals)
def _stats(self, mu):
var = mu
g1 = 1.0/arr(sqrt(mu))
g2 = 1.0 / arr(mu)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson',
shapes="mu", extradoc="""
Poisson distribution
poisson.pmf(k, mu) = exp(-mu) * mu**k / k!
for k >= 0
"""
)
## (Planck) Discrete Exponential
class planck_gen(rv_discrete):
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = inf
return 1
elif (lambda_ < 0):
self.a = -inf
self.b = 0
return 1
return 0 # lambda_ = 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck',longname='A discrete exponential ',
shapes="lamda",
extradoc="""
Planck (Discrete Exponential)
planck.pmf(k,b) = (1-exp(-b))*exp(-b*k)
for k*b >= 0
"""
)
class boltzmann_gen(rv_discrete):
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',longname='A truncated discrete exponential ',
shapes="lamda, N",
extradoc="""
Boltzmann (Truncated Discrete Exponential)
boltzmann.pmf(k,b,N) = (1-exp(-b))*exp(-b*k)/(1-exp(-b*N))
for k=0,..,N-1
"""
)
## Discrete Uniform
class randint_gen(rv_discrete):
def _argcheck(self, min, max):
self.a = min
self.b = max-1
return (max > min)
def _pmf(self, k, min, max):
fact = 1.0 / (max - min)
return fact
def _cdf(self, x, min, max):
k = floor(x)
return (k-min+1)*1.0/(max-min)
def _ppf(self, q, min, max):
vals = ceil(q*(max-min)+min)-1
vals1 = (vals-1).clip(min, max)
temp = self._cdf(vals1, min, max)
return where(temp >= q, vals1, vals)
def _stats(self, min, max):
m2, m1 = arr(max), arr(min)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d-1)*(d+1.0)/12.0
g1 = 0.0
g2 = -6.0/5.0*(d*d+1.0)/(d-1.0)*(d+1.0)
return mu, var, g1, g2
def _rvs(self, min, max=None):
"""An array of *size* random integers >= min and < max.
If max is None, then range is >=0 and < min
"""
return mtrand.randint(min, max, self._size)
def _entropy(self, min, max):
return log(max-min)
randint = randint_gen(name='randint',longname='A discrete uniform '\
'(random integer)', shapes="min, max",
extradoc="""
Discrete Uniform
Random integers >=min and <max.
randint.pmf(k,min, max) = 1/(max-min)
for min <= k < max.
"""
)
# Zipf distribution
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
def _rvs(self, a):
return mtrand.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / arr(special.zeta(a,1) * k**a)
return Pk
def _munp(self, n, a):
return special.zeta(a-n,1) / special.zeta(a,1)
def _stats(self, a):
sv = errp(0)
fac = arr(special.zeta(a,1))
mu = special.zeta(a-1.0,1)/fac
mu2p = special.zeta(a-2.0,1)/fac
var = mu2p - mu*mu
mu3p = special.zeta(a-3.0,1)/fac
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / arr(var**1.5)
mu4p = special.zeta(a-4.0,1)/fac
sv = errp(sv)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / arr(var**2) - 3.0
return mu, var, g1, g2
zipf = zipf_gen(a=1,name='zipf', longname='A Zipf',
shapes="a", extradoc="""
Zipf distribution
zipf.pmf(k,a) = 1/(zeta(a)*k**a)
for k >= 1
"""
)
# Discrete Laplacian
class dlaplace_gen(rv_discrete):
def _pmf(self, k, a):
return tanh(a/2.0)*exp(-a*abs(k))
def _cdf(self, x, a):
k = floor(x)
ind = (k >= 0)
const = exp(a)+1
return where(ind, 1.0-exp(-a*k)/const, exp(a*(k+1))/const)
def _ppf(self, q, a):
const = 1.0/(1+exp(-a))
cons2 = 1+exp(a)
ind = q < const
vals = ceil(where(ind, log(q*cons2)/a-1, -log((1-q)*cons2)/a))
vals1 = (vals-1)
temp = self._cdf(vals1, a)
return where(temp >= q, vals1, vals)
def _stats_skip(self, a):
# variance mu2 does not aggree with sample variance,
# nor with direct calculation using pmf
# remove for now because generic calculation works
# except it does not show nice zeros for mean and skew(?)
ea = exp(-a)
e2a = exp(-2*a)
e3a = exp(-3*a)
e4a = exp(-4*a)
mu2 = 2* (e2a + ea) / (1-ea)**3.0
mu4 = 2* (e4a + 11*e3a + 11*e2a + ea) / (1-ea)**5.0
return 0.0, mu2, 0.0, mu4 / mu2**2.0 - 3
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-inf,
name='dlaplace', longname='A discrete Laplacian',
shapes="a", extradoc="""
Discrete Laplacian distribution.
dlaplace.pmf(k,a) = tanh(a/2) * exp(-a*abs(k))
for a > 0.
"""
)
class skellam_gen(rv_discrete):
def _rvs(self, mu1, mu2):
n = self._size
return np.random.poisson(mu1, n)-np.random.poisson(mu2, n)
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0, ncx2.pdf(2*mu2, 2*(1-x), 2*mu1)*2,
ncx2.pdf(2*mu1, 2*(x+1), 2*mu2)*2)
#ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = np.floor(x)
px = np.where(x < 0, ncx2.cdf(2*mu2, -2*x, 2*mu1),
1-ncx2.cdf(2*mu1, 2*(x+1), 2*mu2))
return px
# enable later
## def _cf(self, w, mu1, mu2):
## # characteristic function
## poisscf = poisson._cf
## return poisscf(w, mu1) * poisscf(-w, mu2)
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / np.sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam',
shapes="mu1,mu2", extradoc="""
Skellam distribution
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, k1-k2 follows a Skellam distribution with
parameters mu1 = lam1 - rho*sqrt(lam1*lam2) and
mu2 = lam2 - rho*sqrt(lam1*lam2), where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then rho = 0.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
"""
)
| gpl-3.0 |
tomlof/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 78 | 6016 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
from sklearn.ensemble.gradient_boosting import QuantileLossFunction
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float64)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float64)
y.fill(0.0)
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float64)
y.fill(1.0)
sw = np.ones(102, dtype=np.float64)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_quantile_loss_function():
# Non regression test for the QuantileLossFunction object
# There was a sign problem when evaluating the function
# for negative values of 'ytrue - ypred'
x = np.asarray([-1.0, 0.0, 1.0])
y_found = QuantileLossFunction(1, 0.9)(x, np.zeros_like(x))
y_expected = np.asarray([0.1, 0.0, 0.9]).mean()
np.testing.assert_allclose(y_found, y_expected)
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
NextThought/pypy-numpy | numpy/lib/twodim_base.py | 37 | 26758 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return v.diagonal(k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
rseubert/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 19 | 22876 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_loss_grad_hess,
_multinomial_loss_grad_hess
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
"""Simple sanity check on a 2 classes dataset
Make sure it predicts the correct result on simple datasets.
"""
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
"""Test for appropriate exception on errors"""
assert_raises(ValueError, LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
"""Test logistic regression with the iris dataset"""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_multinomial_binary():
"""Test multinomial LR on a binary problem."""
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
"""Test sparsify and densify members."""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
"""Test that an exception is raised on inconsistent input"""
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
"""Test that we can write to coef_ and intercept_"""
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
"""Test proper NaN handling.
Regression test for Issue #252: fit used to go into an infinite loop.
"""
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
"""Test that the path algorithm is consistent"""
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_random_state():
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_loss_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_2, grad_2, hess = _logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
loss_interp_2, grad_interp_2, hess = \
_logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
"""test for LogisticRegressionCV object"""
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
loss_interp, grad_interp, hess_interp = _logistic_loss_grad_hess(
w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
loss, grad, hess = _logistic_loss_grad_hess(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
"""Test that OvR and multinomial are correct using the iris dataset."""
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=auto
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='auto')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='auto')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
"""Test that warnings are raised if model does not converge"""
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
"""Tests for the multinomial option in logistic regression"""
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=50, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
_, grad, hessp = _multinomial_loss_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_loss_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_loss_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
"""Test negative prediction when decision_function values are zero.
Liblinear predicts the positive class when decision_function values
are zero. This is a test to verify that we do not do the same.
See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
"""
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
"""Test LogRegCV with solver='liblinear' works for sparse matrices"""
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
| bsd-3-clause |
Lawrence-Liu/scikit-learn | sklearn/linear_model/setup.py | 169 | 1567 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
alyptik/dotfiles | .ipython/profile_default/ipython_config.py | 1 | 22545 | # Configuration file for ipython.
#------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
#------------------------------------------------------------------------------
## A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
## Execute the given command string.
#c.InteractiveShellApp.code_to_run = ''
## Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
#c.InteractiveShellApp.exec_files = []
## lines of code to run at IPython startup.
#c.InteractiveShellApp.exec_lines = []
## A list of dotted module names of IPython extensions to load.
#c.InteractiveShellApp.extensions = []
## dotted module name of an IPython extension to load.
#c.InteractiveShellApp.extra_extension = ''
## A file to be run
#c.InteractiveShellApp.file_to_run = ''
## Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk2', 'gtk3',
# 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4').
#c.InteractiveShellApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
#c.InteractiveShellApp.hide_initial_ns = True
## Configure matplotlib for interactive use with the default matplotlib backend.
#c.InteractiveShellApp.matplotlib = None
## Run the module as a script.
#c.InteractiveShellApp.module_to_run = ''
## Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
#c.InteractiveShellApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
c.InteractiveShellApp.pylab_import_all = True
## Reraise exceptions encountered loading IPython extensions?
#c.InteractiveShellApp.reraise_ipython_extension_failures = False
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
#------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## Whether to create profile dir if it doesn't exist
#c.BaseIPythonApplication.auto_create = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
#c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
#c.BaseIPythonApplication.extra_config_file = ''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
#c.BaseIPythonApplication.ipython_dir = ''
## Whether to overwrite existing config files when copying
#c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
#c.BaseIPythonApplication.profile = 'default'
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
#c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication,InteractiveShellApp) configuration
#------------------------------------------------------------------------------
## Whether to display a banner upon starting IPython.
#c.TerminalIPythonApp.display_banner = True
## If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
#c.TerminalIPythonApp.force_interact = False
## Start IPython quickly by skipping the loading of config files.
#c.TerminalIPythonApp.quick = False
#------------------------------------------------------------------------------
# InteractiveShell(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## An enhanced, interactive shell for Python.
## 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
#c.InteractiveShell.ast_node_interactivity = 'last_expr'
## A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
#c.InteractiveShell.ast_transformers = []
## Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
c.InteractiveShell.autocall = 2
## Autoindent IPython code entered interactively.
# c.InteractiveShell.autoindent = True
## Enable magic commands to be called without the leading %.
# c.InteractiveShell.automagic = True
## The part of the banner to be printed before the profile
#c.InteractiveShell.banner1 = 'Python 3.6.1 (default, Mar 27 2017, 00:27:06) \nType "copyright", "credits" or "license" for more information.\n\nIPython 5.3.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
## The part of the banner to be printed after the profile
#c.InteractiveShell.banner2 = ''
## Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
#c.InteractiveShell.cache_size = 1000
## Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
c.InteractiveShell.color_info = True
## Set the color scheme (NoColor, Neutral, Linux, or LightBG).
c.InteractiveShell.colors = 'Linux'
##
#c.InteractiveShell.debug = False
## **Deprecated**
#
# Will be removed in IPython 6.0
#
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). `deep_reload`
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
#c.InteractiveShell.deep_reload = False
## Don't call post-execute functions that have failed in the past.
#c.InteractiveShell.disable_failing_post_execute = False
## If True, anything that would be passed to the pager will be displayed as
# regular output instead.
#c.InteractiveShell.display_page = False
## (Provisional API) enables html representation in mime bundles sent to pagers.
#c.InteractiveShell.enable_html_pager = False
## Total length of command history
c.InteractiveShell.history_length = 1000000
## The number of saved history entries to be loaded into the history buffer at
# startup.
c.InteractiveShell.history_load_length = 1000000
##
#c.InteractiveShell.ipython_dir = ''
## Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
#c.InteractiveShell.logappend = ''
## The name of the logfile to use.
#c.InteractiveShell.logfile = ''
## Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
#c.InteractiveShell.logstart = False
##
#c.InteractiveShell.object_info_string_level = 0
## Automatically call the pdb debugger after every exception.
#c.InteractiveShell.pdb = False
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in1 = 'In [\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in2 = ' .\\D.: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_out = 'Out[\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompts_pad_left = True
##
#c.InteractiveShell.quiet = False
##
#c.InteractiveShell.separate_in = '\n'
##
#c.InteractiveShell.separate_out = ''
##
#c.InteractiveShell.separate_out2 = ''
## Show rewritten input, e.g. for autocall.
#c.InteractiveShell.show_rewritten_input = True
## Enables rich html representation of docstrings. (This requires the docrepr
# module).
#c.InteractiveShell.sphinxify_docstring = False
##
#c.InteractiveShell.wildcards_case_sensitive = True
##
#c.InteractiveShell.xmode = 'Context'
#------------------------------------------------------------------------------
# TerminalInteractiveShell(InteractiveShell) configuration
#------------------------------------------------------------------------------
## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
#c.TerminalInteractiveShell.confirm_exit = True
c.TerminalInteractiveShell.confirm_exit = False
## Options for displaying tab completions, 'column', 'multicolumn', and
# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit`
# documentation for more information.
#c.TerminalInteractiveShell.display_completions = 'multicolumn'
## Shortcut style to use at the prompt. 'vi' or 'emacs'.
# c.TerminalInteractiveShell.editing_mode = 'vi'
c.TerminalInteractiveShell.editing_mode = 'emacs'
## Set the editor used by IPython (default to $EDITOR/vi/notepad).
c.TerminalInteractiveShell.editor = 'vim'
## Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is
# in addition to the F2 binding, which is always enabled.
#c.TerminalInteractiveShell.extra_open_editor_shortcuts = False
c.TerminalInteractiveShell.extra_open_editor_shortcuts = True
## Highlight matching brackets.
#c.TerminalInteractiveShell.highlight_matching_brackets = True
## The name or class of a Pygments style to use for syntax
# highlighting:
# default, emacs, friendly, colorful, autumn, murphy, manni, monokai, perldoc, pastie, borland, trac, native, fruity, bw, vim, vs, tango, rrt, xcode, igor, paraiso-light, paraiso-dark, lovelace, algol, algol_nu, arduino, rainbow_dash, abap
# c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined
c.TerminalInteractiveShell.highlighting_style = 'monokai'
## Override highlighting format for specific tokens
#c.TerminalInteractiveShell.highlighting_style_overrides = {}
## Enable mouse support in the prompt
c.TerminalInteractiveShell.mouse_support = True
## Class used to generate Prompt token for prompt_toolkit
#c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
## Use `raw_input` for the REPL, without completion, multiline input, and prompt
# colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
#c.TerminalInteractiveShell.simple_prompt = False
## Number of line at the bottom of the screen to reserve for the completion menu
#c.TerminalInteractiveShell.space_for_menu = 6
## Automatically set the terminal title
#c.TerminalInteractiveShell.term_title = True
## Use 24bit colors instead of 256 colors in prompt highlighting. If your
# terminal supports true color, the following command should print 'TRUECOLOR'
# in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n"
# c.TerminalInteractiveShell.true_color = True
c.TerminalInteractiveShell.true_color = False
#------------------------------------------------------------------------------
# HistoryAccessor(HistoryAccessorBase) configuration
#------------------------------------------------------------------------------
## Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
## Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
#c.HistoryAccessor.connection_options = {}
## enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
#c.HistoryAccessor.enabled = True
## Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
#c.HistoryAccessor.hist_file = ''
#------------------------------------------------------------------------------
# HistoryManager(HistoryAccessor) configuration
#------------------------------------------------------------------------------
## A class to organize all history-related functionality in one place.
## Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
#c.HistoryManager.db_cache_size = 0
## Should the history database include output? (default: no)
#c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# ProfileDir(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
## Set the profile location directly. This overrides the logic used by the
# `profile` option.
#c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# BaseFormatter(Configurable) configuration
#------------------------------------------------------------------------------
## A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
##
#c.BaseFormatter.deferred_printers = {}
##
#c.BaseFormatter.enabled = True
##
#c.BaseFormatter.singleton_printers = {}
##
#c.BaseFormatter.type_printers = {}
#------------------------------------------------------------------------------
# PlainTextFormatter(BaseFormatter) configuration
#------------------------------------------------------------------------------
## The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
##
#c.PlainTextFormatter.float_precision = ''
## Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
#c.PlainTextFormatter.max_seq_length = 1000
##
#c.PlainTextFormatter.max_width = 79
##
#c.PlainTextFormatter.newline = '\n'
##
#c.PlainTextFormatter.pprint = True
##
#c.PlainTextFormatter.verbose = False
#------------------------------------------------------------------------------
# Completer(Configurable) configuration
#------------------------------------------------------------------------------
## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
#c.Completer.greedy = False
#------------------------------------------------------------------------------
# IPCompleter(Completer) configuration
#------------------------------------------------------------------------------
## Extension of the completer class with IPython-specific features
## DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
#c.IPCompleter.limit_to__all__ = False
## Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
#c.IPCompleter.merge_completions = True
## Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
c.IPCompleter.omit__names = 0
#------------------------------------------------------------------------------
# ScriptMagics(Magics) configuration
#------------------------------------------------------------------------------
## Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
## Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
#c.ScriptMagics.script_magics = []
## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
#c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics(Magics) configuration
#------------------------------------------------------------------------------
## Lightweight persistence for python variables.
#
# Provides the %store magic.
## If True, any %store-d variables will be automatically restored when IPython
# starts.
#c.StoreMagics.autorestore = False
# vi:ft=cfg:
| gpl-3.0 |
kazemakase/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 215 | 11427 | import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
mailhexu/pyDFTutils | pyDFTutils/phonon/parser.py | 2 | 16721 | #!/usr/bin/env python
import os
import numpy as np
from ase.data import chemical_symbols
import matplotlib.pyplot as plt
from abipy.abilab import abiopen
from pyDFTutils.perovskite.perovskite_mode import label_zone_boundary, label_Gamma
from ase.units import Ha
from spglib import spglib
def displacement_cart_to_evec(displ_cart,
masses,
scaled_positions,
qpoint=None,
add_phase=True):
"""
displ_cart: cartisien displacement. (atom1_x, atom1_y, atom1_z, atom2_x, ...)
masses: masses of atoms.
scaled_postions: scaled postions of atoms.
qpoint: if phase needs to be added, qpoint must be given.
add_phase: whether to add phase to the eigenvectors.
"""
if add_phase and qpoint is None:
raise ValueError('qpoint must be given if adding phase is needed')
m = np.sqrt(np.kron(masses, [1, 1, 1]))
evec = displ_cart * m
if add_phase:
phase = [
np.exp(-2j * np.pi * np.dot(pos, qpoint))
for pos in scaled_positions
]
phase = np.kron(phase, [1, 1, 1])
evec *= phase
evec /= np.linalg.norm(evec)
return evec
def ixc_to_xc(ixc):
"""
translate ixc (positive: abinit. negative: libxc) to XC.
"""
xcdict = {
0: 'NO-XC',
1: 'LDA',
2: 'LDA-PZCA',
3: 'LDA-CA',
4: 'LDA-Winger',
5: 'LDA-Hedin-Lundqvist',
6: 'LDA-X-alpha',
7: 'LDA-PW92',
8: 'LDA-PW92-xonly',
9: 'LDA-PW92-xRPA',
11: 'GGA-PBE',
12: 'GGA-PBE-xonly',
14: 'GGA-revPBE',
15: 'GGA-RPBE',
16: 'GGA-HTCH93',
17: 'GGA-HTCH120',
23: 'GGA-WC',
40: 'Hartree-Fock',
41: 'GGA-PBE0',
42: 'GGA-PBE0-1/3',
-1009: 'LDA-PZCA',
-101130: 'GGA-PBE',
-106131: 'GGA-BLYP',
-106132: 'GGA-BP86',
-116133: 'GGA-PBEsol',
-118130: 'GGA-WC',
}
if ixc in xcdict:
return xcdict[ixc]
else:
return 'libxc_%s' % ixc
class mat_data():
def __init__(self,
name,
mag='PM',
description="None",
author='High Throughput Bot',
email='[email protected]',
is_verified=False,
verification_info="",
tags=[]
):
self._already_in_db = False
self.name = name
self.db_directory = None
self.all_data_directory = None
self.mag = mag
self.insert_time = None
self.update_time = None
self.log = ""
self.description = description
self.author = author
self.email = email
self.tags=tags
self.is_verified = is_verified
self.verification_info = verification_info
# properties in database. should be band | phonon
self.has_ebands = False
self.has_phonon = False
self.is_cubic_perovskite = True
self.cellpar = [0] * 6
self.cell = [0]*9
self.natoms = 0
self.chemical_symbols = []
self.masses = []
self.scaled_positions = []
self.ispin = 0
self.spinat = []
self.spgroup = 1
self.spgroup_name = 'P1'
self.ixc = 1
self.XC = 'PBEsol'
self.pp_type = 'ONCV'
self.pp_info = 'Not implemented yet.'
self.U_type=0
self.species=[]
self.zion=[]
self.U_l=[]
self.U_u=[]
self.U_j=[]
self.GSR_parameters = {}
self.energy = 0
self.efermi = 0
self.bandgap = 0
self.ebands = {}
self.kptrlatt=[]
self.usepaw=0
self.pawecutdg=0.0
self.nsppol=1
self.nspden=1
self.emacro = [0.0] * 9
self.becs = {}
self.elastic = []
self.nqpts = [1, 1, 1]
self.special_qpts = {}
self.phonon_mode_freqs = {}
self.phonon_mode_names = {}
self.phonon_mode_evecs = {}
self.phonon_mode_phdispl = {}
self.phonon_mode_freqs_LOTO = {}
self.phonon_mode_names_LOTO = {}
self.phonon_mode_evecs_LOTO = {}
self.phonon_mode_phdispl_LOTO = {}
def read_BAND_nc(self, fname, outputfile='Ebands.png', plot_ebands=True):
try:
band_file = abiopen(fname)
self.has_ebands = True
except Exception:
raise IOError("can't read %s" % fname)
self.efermi = band_file.energy_terms.e_fermie
gap = band_file.ebands.fundamental_gaps
if len(gap) != 0:
for g in gap:
self.gap = g.energy
self.is_direct_gap = g.is_direct
self.bandgap = self.gap
if plot_ebands:
fig, ax = plt.subplots()
fig = band_file.ebands.plot(ax=ax, show=False, ylims=[-7, 5])
fig.savefig(outputfile)
def read_OUT_nc(self, fname):
f = abiopen(fname)
self.invars = f.get_allvars()
for key in self.invars:
if isinstance(self.invars[key], np.ndarray):
self.invars[key] = tuple(self.invars[key])
self.spgroup = f.spgroup[0]
self.ixc = f.ixc[0]
self.XC = ixc_to_xc(self.ixc)
self.ecut = f.ecut[0]
self.species = [chemical_symbols[int(i)] for i in f.znucl]
if 'usepawu' in self.invars:
self.U_type= f.usepawu[0]
else:
self.U_type= 0
if self.U_type:
self.U_l = f.lpawu
self.U_u= [ x * Ha for x in f.upawu]
self.U_j= [ x* Ha for x in f.jpawu ]
#self.nband = f.nband[0]
self.kptrlatt = tuple(f.kptrlatt)
def print_scf_info(self):
for key, val in self.invars:
print("%s : %s\n" % (key, val))
def read_GSR_nc(self, fname):
f = abiopen(fname)
self.energy = f.energy
self.stress_tensor = f.cart_stress_tensor # unit ?
self.forces = np.array(f.cart_forces) # unit eV/ang
def read_DDB(self,
fname=None,
do_label=True,
workdir=None,
phonon_output_dipdip='phonon_band_dipdip.png',
phonon_output_nodipdip='phonon_band_nodipdip.png'):
"""
read phonon related properties from DDB file.
"""
self.has_phonon = True
ddb = abiopen(fname)
self.ddb_header = ddb.header
self.atoms = ddb.structure.to_ase_atoms()
self.natoms = len(self.atoms)
self.cellpar = self.atoms.get_cell_lengths_and_angles()
self.cell=self.atoms.get_cell().flatten()
self.masses = self.atoms.get_masses()
self.scaled_positions = self.atoms.get_scaled_positions()
self.chemical_symbols = self.atoms.get_chemical_symbols()
self.spgroup_name = spglib.get_spacegroup(self.atoms,symprec=1e-4)
self.ixc = self.ddb_header['ixc']
self.XC = ixc_to_xc( self.ixc)
self.ispin = self.ddb_header['nsppol']
self.spinat = self.ddb_header['spinat']
self.nband = self.ddb_header['nband']
self.ecut = self.ddb_header['ecut']
self.tsmear =self.ddb_header['tsmear']
self.usepaw =self.ddb_header['usepaw']
self.pawecutdg = self.ddb_header['tsmear']
self.nsppol = self.ddb_header['nsppol']
self.nspden= self.ddb_header['nspden']
self.species = [chemical_symbols[int(i)] for i in self.ddb_header['znucl']]
self.zion = [int(x) for x in self.ddb_header['zion']]
self.znucl = [int(x) for x in self.ddb_header['znucl']]
emacror, becsr = ddb.anaget_emacro_and_becs()
emacro = emacror[0].cartesian_tensor
becs_array = becsr.values
becs = {}
for i, bec in enumerate(becs_array):
becs[str(i)] = bec
nqpts = ddb._guess_ngqpt()
qpts = tuple(ddb.qpoints.frac_coords)
self.emacro = emacro
self.becs = becs
self.nqpts = nqpts
self.qpts = qpts
for qpt in qpts:
qpt = tuple(qpt)
m = ddb.anaget_phmodes_at_qpoint(qpt)
#self.results['phonon'][qpt]['frequencies'] = m.phfreqs
#self.results['phonon'][qpt][
# 'eigen_displacements'] = m.phdispl_cart
qpoints, evals, evecs, edisps = self.phonon_band(
ddb,
lo_to_splitting=False,
phonon_output_dipdip=phonon_output_dipdip,
phonon_output_nodipdip=phonon_output_nodipdip)
#for i in range(15):
# print(evecs[0, :, i])
self.special_qpts = {
'X': (0, 0.5, 0.0),
'M': (0.5, 0.5, 0),
'R': (0.5, 0.5, 0.5)
}
zb_modes = self.label_zone_boundary_all(
qpoints, evals, evecs, label=do_label)
for qname in self.special_qpts:
self.phonon_mode_freqs[qname] = zb_modes[qname][0]
self.phonon_mode_names[qname] = zb_modes[qname][1]
self.phonon_mode_evecs[qname] = zb_modes[qname][2]
Gmodes = self.label_Gamma_all(qpoints, evals, evecs, label=do_label)
self.phonon_mode_freqs['Gamma'] = Gmodes[0]
self.phonon_mode_names['Gamma'] = Gmodes[1]
self.phonon_mode_evecs['Gamma'] = Gmodes[2]
def get_zb_mode(self, qname, mode_name):
"""
return the frequencies of mode name.
"""
ibranches = []
freqs = []
for imode, mode in enumerate(
self.results['phonon']['boundary_modes'][qname]):
freq, mname = mode
if mname == mode_name:
ibranches.append(imode)
freqs.append(freq)
return ibranches, freqs
def get_gamma_modes(self):
"""
return (Freqs, names, evecs)
"""
return self.phonon_mode_freqs['Gamma'], self.phonon_mode_names['Gamma'], self.phonon_mode_evecs['Gamma'],
def get_gamma_mode(self, mode_name):
"""
return the frequencies of mode name.
"""
ibranches = []
freqs = []
for imode, mode in enumerate(zip(self.phonon_mode_freqs['Gamma'], self.phonon_mode_names['Gamma'])):
freq, mname = mode
if mname == mode_name:
ibranches.append(imode)
freqs.append(freq)
return ibranches, freqs
def label_Gamma_all(self, qpoints, evals, evecs, label=True):
Gamma_mode_freqs = []
Gamma_mode_names = []
Gamma_mode_evecs = []
for i, qpt in enumerate(qpoints):
if np.isclose(qpt, [0, 0, 0], rtol=1e-5, atol=1e-3).all():
evecq = evecs[i]
for j, evec in enumerate(evecq.T):
freq = evals[i][j]
if label:
mode = label_Gamma(
evec=evec, masses=self.atoms.get_masses())
Gamma_mode_names.append(mode)
else:
Gamma_mode_names.append('')
Gamma_mode_freqs.append(freq)
Gamma_mode_evecs.append(np.real(evec))
return Gamma_mode_freqs, Gamma_mode_names, Gamma_mode_evecs
if Gamma_mode_names == []:
print("Warning: No Gamma point found in qpoints.\n")
return Gamma_mode_freqs, Gamma_mode_names, Gamma_mode_evecs
def label_zone_boundary_all(self, qpoints, evals, evecs, label=True):
mode_dict = {}
qdict = {'X': (0, 0.5, 0.0), 'M': (0.5, 0.5, 0), 'R': (0.5, 0.5, 0.5)}
for i, qpt in enumerate(qpoints):
for qname in qdict:
if np.isclose(qpt, qdict[qname], rtol=1e-5, atol=1e-3).all():
mode_freqs = []
mode_names = []
mode_evecs = []
#print "===================================="
#print qname
evecq = evecs[i]
for j, evec in enumerate(evecq.T):
freq = evals[i][j]
mode_freqs.append(freq)
if label:
mode = label_zone_boundary(qname, evec=evec)
mode_names.append(mode)
else:
mode_names.append('')
mode_evecs.append(np.real(evec))
mode_dict[qname] = (mode_freqs, mode_names, mode_evecs)
return mode_dict
def phonon_band(self,
ddb,
lo_to_splitting=False,
workdir=None,
phonon_output_dipdip='phonon_band_dipdip.png',
phonon_output_nodipdip='phonon_band_nodipdip.png',
show=False):
atoms = ddb.structure.to_ase_atoms()
if workdir is not None:
workdir_dip = os.path.join(workdir, '/phbst_dipdip')
#if os.path.exists(workdir_dip):
# os.system('rm -r %s' % workdir_dip)
else:
workdir_dip = None
phbst, phdos = ddb.anaget_phbst_and_phdos_files(
nqsmall=10,
asr=1,
chneut=1,
dipdip=1,
verbose=1,
lo_to_splitting=True,
anaddb_kwargs={'alphon': 1},
workdir=workdir_dip,
#qptbounds=kpath_bounds,
)
fig, ax = plt.subplots(nrows=1, ncols=1)
#plt.tight_layout(pad=2.19)
#plt.axis('tight')
plt.gcf().subplots_adjust(left=0.17)
ax.axhline(0, linestyle='--', color='black')
ax.set_title(self.name)
ticks, labels = phbst.phbands._make_ticks_and_labels(qlabels=None)
fig.axes[0].set_xlim([ticks[0],ticks[-1]])
fig = phbst.phbands.plot(
ax=ax,
units='cm-1',
match_bands=False,
linewidth=1.7,
color='blue',
show=False)
fig.axes[0].grid(False)
if show:
plt.show()
if phonon_output_dipdip:
fig.savefig(phonon_output_dipdip)
plt.close()
if workdir is not None:
workdir_nodip = os.path.join(workdir, 'phbst_nodipdip')
#if os.path.exists(workdir_dip):
# os.system('rm -r %s' % workdir_nodip)
else:
workdir_nodip = None
phbst, phdos = ddb.anaget_phbst_and_phdos_files(
nqsmall=5,
asr=1,
chneut=1,
dipdip=0,
verbose=1,
lo_to_splitting=False,
anaddb_kwargs={'alphon': 1},
workdir=workdir_nodip
#qptbounds=kpath_bounds,
)
fig, ax = plt.subplots(nrows=1, ncols=1)
#plt.tight_layout(pad=2.19)
#plt.axis('tight')
plt.gcf().subplots_adjust(left=0.17)
ax.axhline(0, linestyle='--', color='black')
ax.set_title(self.name)
ax.set_title(self.name)
ticks, labels = phbst.phbands._make_ticks_and_labels(qlabels=None)
fig.axes[0].set_xlim([ticks[0],ticks[-1]])
fig = phbst.phbands.plot(
ax=ax,
units='cm-1',
match_bands=False,
linewidth=1.4,
color='blue',
show=False)
fig.axes[0].grid(False)
if show:
plt.show()
if phonon_output_dipdip:
fig.savefig(phonon_output_nodipdip)
plt.close()
qpoints = phbst.qpoints.frac_coords
nqpts = len(qpoints)
nbranch = 3 * len(atoms)
evals = np.zeros([nqpts, nbranch])
evecs = np.zeros([nqpts, nbranch, nbranch], dtype='complex128')
edisps = np.zeros([nqpts, nbranch, nbranch], dtype='complex128')
masses = atoms.get_masses()
scaled_positions = atoms.get_scaled_positions()
for iqpt, qpt in enumerate(qpoints):
for ibranch in range(nbranch):
phmode = phbst.get_phmode(qpt, ibranch)
evals[iqpt, ibranch] = phmode.freq * 8065.6
evec = displacement_cart_to_evec(
phmode.displ_cart,
masses,
scaled_positions,
qpoint=qpt,
add_phase=False)
evecs[iqpt, :, ibranch] = evec / np.linalg.norm(evec)
edisps[iqpt, :, ibranch] = phmode.displ_cart
return qpoints, evals, evecs, edisps
def test():
m = mat_data()
m.read_BAND_nc('./BAND_GSR.nc')
m.read_OUT_nc('./OUT.nc')
m.read_DDB('out_DDB')
#test()
| lgpl-3.0 |
pieleric/odemis | src/odemis/acq/stream/_base.py | 2 | 56609 | # -*- coding: utf-8 -*-
'''
Created on 25 Jun 2014
@author: Éric Piel
Copyright © 2014-2015 Éric Piel, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with Odemis. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import division
from past.builtins import long
import collections
import functools
import gc
import logging
import math
import numbers
import numpy
from odemis import model
from odemis.model import (MD_POS, MD_PIXEL_SIZE, MD_ROTATION, MD_ACQ_DATE,
MD_SHEAR, VigilantAttribute, VigilantAttributeBase,
MD_POL_HORIZONTAL, MD_POL_VERTICAL, MD_POL_POSDIAG,
MD_POL_NEGDIAG, MD_POL_RHC, MD_POL_LHC, MD_POL_S0, MD_POL_S1, MD_POL_S2, MD_POL_S3,
MD_POL_DS0, MD_POL_DS1, MD_POL_DS2, MD_POL_DS3, MD_POL_EPHI, MD_POL_ETHETA, MD_POL_EX,
MD_POL_EY, MD_POL_EZ, MD_POL_DOP, MD_POL_DOLP, MD_POL_DOCP, MD_POL_UP, MD_POL_DS1N,
MD_POL_DS2N, MD_POL_DS3N, MD_POL_S1N, MD_POL_S2N, MD_POL_S3N, TINT_FIT_TO_RGB, TINT_RGB_AS_IS)
from odemis.util import img
import threading
import time
import weakref
import matplotlib
# TODO: move to odemis.acq (once it doesn't depend on odemis.acq.stream)
# Contains the base of the streams. Can be imported from other stream modules.
# to identify a ROI which must still be defined by the user
from odemis.util.transform import AffineTransform
UNDEFINED_ROI = (0, 0, 0, 0)
# use hardcode list of polarization positions necessary for polarimetry analysis
POL_POSITIONS = (MD_POL_HORIZONTAL, MD_POL_VERTICAL, MD_POL_POSDIAG,
MD_POL_NEGDIAG, MD_POL_RHC, MD_POL_LHC)
POL_POSITIONS_RESULTS = (MD_POL_DS0, MD_POL_DS1, MD_POL_DS2, MD_POL_DS3,
MD_POL_DS1N, MD_POL_DS2N, MD_POL_DS3N,
MD_POL_S0, MD_POL_S1, MD_POL_S2, MD_POL_S3,
MD_POL_S1N, MD_POL_S2N, MD_POL_S3N,
MD_POL_EPHI, MD_POL_ETHETA, MD_POL_EX, MD_POL_EY, MD_POL_EZ,
MD_POL_DOP, MD_POL_DOLP, MD_POL_DOCP, MD_POL_UP)
# user-friendly look-up dict for display in legend
POL_POSITIONS_2_DISPLAY = {MD_POL_HORIZONTAL: "Horizontal",
MD_POL_VERTICAL: "Vertical",
MD_POL_POSDIAG: "Positive diagonal",
MD_POL_NEGDIAG: "Negative diagonal",
MD_POL_RHC: "Right-handed circular",
MD_POL_LHC: "Left-handed circular",
MD_POL_DS0: "Stokes parameter detector plane S0",
MD_POL_DS1: "Stokes parameter detector plane S1",
MD_POL_DS2: "Stokes parameter detector plane S2",
MD_POL_DS3: "Stokes parameter detector plane S3",
MD_POL_DS1N: "Normalized stokes parameter detector plane S1",
MD_POL_DS2N: "Normalized stokes parameter detector plane S2",
MD_POL_DS3N: "Normalized stokes parameter detector plane S3",
MD_POL_S0: "Stokes parameter sample plane S0",
MD_POL_S1: "Stokes parameter sample plane S1",
MD_POL_S2: "Stokes parameter sample plane S2",
MD_POL_S3: "Stokes parameter sample plane S3",
MD_POL_S1N: "Normalized stokes parameter sample plane S1",
MD_POL_S2N: "Normalized stokes parameter sample plane S2",
MD_POL_S3N: "Normalized stokes parameter sample plane S3",
MD_POL_EPHI: u"Electrical field amplitude φ",
MD_POL_ETHETA: u"Electrical field amplitude θ",
MD_POL_EX: "Electrical field amplitude Ex",
MD_POL_EY: "Electrical field amplitude Ey",
MD_POL_EZ: "Electrical field amplitude Ez",
MD_POL_DOP: "Degree of polarization",
MD_POL_DOLP: "Degree of linear polarization",
MD_POL_DOCP: "Degree of circular polarization",
MD_POL_UP: "Degree of unpolarized light"
}
POL_MOVE_TIME = 6 # [s] extra time to move polarimetry hardware (value is very approximate)
class Stream(object):
""" A stream combines a Detector, its associated Dataflow and an Emitter.
It handles acquiring the data from the hardware and renders it as a RGB
image (with MD_PIXEL_SIZE and MD_POS copied)
This is an abstract class, unless the emitter doesn't need any configuration
(always on, with the right settings).
Note: If a Stream needs multiple Emitters, then this should be implemented
in a subclass of Stream.
Note: in general it's a bad idea to use .resolution as a local VA (because
it's automatically modified by binning/scale and affect by .roi)
"""
# Minimum overhead time in seconds when acquiring an image
SETUP_OVERHEAD = 0.1
def __init__(self, name, detector, dataflow, emitter, focuser=None, opm=None,
hwdetvas=None, hwemtvas=None, detvas=None, emtvas=None, axis_map={},
raw=None, acq_type=None):
"""
name (string): user-friendly name of this stream
detector (Detector): the detector which has the dataflow
dataflow (Dataflow): the dataflow from which to get the data
emitter (Emitter): the emitter
opm (OpticalPathManager): the optical path manager
focuser (Actuator or None): an actuator with a 'z' axis that allows to change
the focus
hwdetvas (None or set of str): names of all detector hardware VAs to be controlled by this
Stream
hwemtvas (None or set of str): names of all emitter hardware VAs to be controlled by this
Stream
axis_map (None or dict of axis_name_in_stream(str) -> (str, Actuator)): names of all of the axes that
are connected to the stream and should be controlled
detvas (None or set of str): names of all the detector VigilantAttributes
(VAs) to be duplicated on the stream. They will be named .detOriginalName
emtvas (None or set of str): names of all the emitter VAs to be
duplicated on the stream. They will be named .emtOriginalName
raw (None or list of DataArrays or DataArrayShadow): raw data to be used
at initialisation. By default, it will contain no data.
acq_type (MD_AT_*): acquisition type associated with this stream (as in model._metadata)
"""
self.name = model.StringVA(name)
self.acquisitionType = model.VigilantAttribute(acq_type) # MD_ACQ_TYPE or None
# for identification of the acquisition type associated with the stream
# Hardware Components
self._detector = detector
self._emitter = emitter
self._focuser = focuser
self._opm = opm
# Dataflow (Live image stream with meta data)
# Note: A Detector can have multiple dataflows, so that's why a Stream
# has a separate attribute.
self._dataflow = dataflow
# TODO: We need to reorganise everything so that the
# image display is done via a dataflow (in a separate thread), instead
# of a VA.
self._im_needs_recompute = threading.Event()
self._init_thread()
# list of DataArray(Shadow) received and used to generate the image
# every time it's modified, image is also modified
if raw is None:
self.raw = []
else:
self.raw = raw
# initialize the projected tiles cache
self._projectedTilesCache = {}
# initialize the raw tiles cache
self._rawTilesCache = {}
# TODO: should better be based on a BufferedDataFlow: subscribing starts
# acquisition and sends (raw) data to whoever is interested. .get()
# returns the previous or next image acquired.
# indicating if stream has already been prepared
self._prepared = False
# TODO: should_update is a GUI stuff => move away from stream
# should_update has no effect direct effect, it's just a flag to
# indicate the user would like to have the stream updated (live)
self.should_update = model.BooleanVA(False)
# is_active set to True will keep the acquisition going on
self.is_active = model.BooleanVA(False, setter=self._is_active_setter)
# Leech to use during acquisition.
# Note: for now only some streams actually use them (MDStreams*)
self.leeches = []
# Hardware VA that the stream is directly linked to
self.hw_vas = {}
self.hw_vas.update(self._getVAs(detector, hwdetvas or set()))
self.hw_vas.update(self._getVAs(emitter, hwemtvas or set()))
# Duplicate VA if requested
self._hwvas = {} # str (name of the proxied VA) -> original Hw VA
self._hwvasetters = {} # str (name of the proxied VA) -> setter
self._lvaupdaters = {} # str (name of the proxied VA) -> listener
self._axisvaupdaters = {} # str (name of the axis VA) -> listener (functools.partial)
self._posupdaters = {} # Actuator -> listener (functools.partial)
self._det_vas = self._duplicateVAs(detector, "det", detvas or set())
self._emt_vas = self._duplicateVAs(emitter, "emt", emtvas or set())
self._axis_map = axis_map or {}
self._axis_vas = self._duplicateAxes(self._axis_map)
self._dRangeLock = threading.Lock()
self._drange = None # min/max data range, or None if unknown
self._drange_unreliable = True # if current values are a rough guess (based on detector)
# drange_raw is the smaller (less zoomed) image of an pyramidal image. It is used
# instead of the full image because it would be too slow or even impossible to read
# the full data from the image to the memory. It is also not the tiles from the tiled
# image, so the code for pyramidal and non-pyramidal images
# that reads drange_raw is the same.
# The drawback of not using the full image, is that some of the pixels are lost, so
# maybe the max/min of the smaller image is different from the min/max of the full image.
# And the histogram of both images will probably be a bit different also.
if raw and isinstance(raw[0], model.DataArrayShadow):
# if the image is pyramidal, use the smaller image
drange_raw = self._getMergedRawImage(raw[0], raw[0].maxzoom)
else:
drange_raw = None
# TODO: move to the DataProjection class
self.auto_bc = model.BooleanVA(True)
self.auto_bc.subscribe(self._onAutoBC)
# % of values considered outliers discarded in auto BC detection
# Note: 1/256th is a nice value because on RGB, it means in degenerated
# cases (like flat histogram), you still loose only one value on each
# side.
self.auto_bc_outliers = model.FloatContinuous(100 / 256, range=(0, 40))
self.auto_bc_outliers.subscribe(self._onOutliers)
# The tint VA could be either:
# - a list tuple RGB value (for a tint) or
# - a matplotlib.colors.Colormap object for a custom color map
# - a string of value TINT_FIT_TO_RGB to indicate fit RGB color mapping
self.tint = model.VigilantAttribute((255, 255, 255), setter=self._setTint)
# Used if auto_bc is False
# min/max ratio of the whole intensity level which are mapped to
# black/white. Its range is ._drange (will be updated by _updateDRange)
self.intensityRange = model.TupleContinuous((0, 0),
range=((0, 0), (1, 1)),
cls=(int, long, float),
setter=self._setIntensityRange)
# Make it so that the value gets clipped when its range is updated and
# the value is outside of it.
self.intensityRange.clip_on_range = True
self._updateDRange(drange_raw) # sets intensityRange
self._init_projection_vas()
# Histogram of the current image _or_ slightly older image.
# Note it's an ndarray. Use .tolist() to get a python list.
self.histogram = model.VigilantAttribute(numpy.empty(0), readonly=True)
self.histogram._full_hist = numpy.ndarray(0) # for finding the outliers
self.histogram._edges = None
# Tuple of (int, str) or (None, None): loglevel and message
self.status = model.VigilantAttribute((None, None), readonly=True)
# Background data, to be subtracted from the acquisition data before
# projection. It should be the same shape and dtype as the acquisition
# data, otherwise no subtraction will be performed. If None, nothing is
# subtracted is applied.
self.background = model.VigilantAttribute(None, setter=self._setBackground)
self.background.subscribe(self._onBackground)
# if there is already some data, update image with it
# TODO: have this done by the child class, if needed.
if self.raw:
self._updateHistogram(drange_raw)
self._onNewData(None, self.raw[0])
def _init_projection_vas(self):
""" Initialize the VAs related with image projection
"""
# DataArray or None: RGB projection of the raw data
self.image = model.VigilantAttribute(None)
# Don't call at init, so don't set metadata if default value
self.tint.subscribe(self.onTint)
self.intensityRange.subscribe(self._onIntensityRange)
def _init_thread(self, period=0.1):
""" Initialize the thread that updates the image
"""
self._imthread = threading.Thread(target=self._image_thread,
args=(weakref.ref(self), period),
name="Image computation of %s" % self.name.value)
self._imthread.daemon = True
self._imthread.start()
# No __del__: subscription should be automatically stopped when the object
# disappears, and the user should stop the update first anyway.
@property
def emitter(self):
return self._emitter
@property
def detector(self):
return self._detector
@property
def focuser(self):
return self._focuser
@property
def det_vas(self):
return self._det_vas
@property
def emt_vas(self):
return self._emt_vas
@property
def axis_vas(self):
return self._axis_vas
def __str__(self):
return "%s %s" % (self.__class__.__name__, self.name.value)
def _getVAs(self, comp, va_names):
if not isinstance(va_names, set):
raise ValueError(u"vas should be a set but got %s" % (va_names,))
vas = {}
for vaname in va_names:
try:
va = getattr(comp, vaname)
except AttributeError:
raise LookupError(u"Component %s has not attribute %s" %
(comp.name, vaname))
if not isinstance(va, VigilantAttributeBase):
raise LookupError(u"Component %s attribute %s is not a VA: %s" %
(comp.name, vaname, va.__class__.__name__))
setattr(self, vaname, va)
vas[vaname] = va
return vas
def _duplicateVAs(self, comp, prefix, va_names):
""" Duplicate all the given VAs of the given component and rename them with the prefix
:param comp: (Component) the component on which to find the VAs
:param prefix: (str) prefix to put before the name of each VA
:param va_names: (set of str) names of all the VAs
:raise:
LookupError: if the component doesn't have a listed VA
:return:
Dictionary (str -> VA): original va name -> duplicated va
"""
if not isinstance(va_names, set):
raise ValueError("vas should be a set but got %s" % (va_names,))
dup_vas = {}
for vaname in va_names:
# Skip the duplication if the VA is already linked as a direct hardware VA
if vaname in self.hw_vas:
continue
try:
va = getattr(comp, vaname)
except AttributeError:
raise LookupError(u"Component %s has not attribute %s" %
(comp.name, vaname))
if not isinstance(va, VigilantAttributeBase):
raise LookupError(u"Component %s attribute %s is not a VA: %s" %
(comp.name, vaname, va.__class__.__name__))
# TODO: add a setter/listener that will automatically synchronise the VA value
# as long as the stream is active
vasetter = functools.partial(self._va_sync_setter, va)
dupva = self._duplicateVA(va, setter=vasetter)
logging.debug(u"Duplicated VA '%s' with value %s", vaname, va.value)
# Collect the vas, so we can return them at the end of the method
dup_vas[vaname] = dupva
# Convert from originalName to prefixOriginalName
newname = prefix + vaname[0].upper() + vaname[1:]
setattr(self, newname, dupva)
# Keep the link between the new VA and the original VA so they can be synchronised
self._hwvas[newname] = va
# Keep setters, mostly to not have them dereferenced
self._hwvasetters[newname] = vasetter
return dup_vas
def _va_sync_setter(self, origva, v):
"""
Setter for proxied VAs
origva (VA): the original va
v: the new value
return: the real new value (as accepted by the original VA)
"""
if self.is_active.value: # only synchronised when the stream is active
logging.debug(u"Updating VA (%s) to %s", origva, v)
origva.value = v
return origva.value
else:
logging.debug(u"Not updating VA (%s) to %s", origva, v)
return v
def _va_sync_from_hw(self, lva, v):
"""
Called when the Hw VA is modified, to update the local VA
lva (VA): the local VA
v: the new value
"""
# Don't use the setter, directly put the value as-is. That avoids the
# setter to again set the Hw VA, and ensure we always accept the Hw
# value
logging.debug(u"Updating local VA (%s) to %s", lva, v)
if lva._value != v:
lva._value = v # TODO: works with ListVA?
lva.notify(v)
def _duplicateAxis(self, axis_name, actuator):
"""
Create a new VigilanteAttribute (VA) for the given axis, , which imitates is behaviour.
axis_name (str): the name of the axis to define
actuator (Actuator): the actuator
return (VigilantAttribute): new VA
"""
axis = actuator.axes[axis_name]
pos = actuator.position.value[axis_name]
if hasattr(axis, "choices"):
return model.VAEnumerated(pos, choices=axis.choices, unit=axis.unit)
elif hasattr(axis, "range"):
# Continuous
return model.FloatContinuous(pos, range=axis.range, unit=axis.unit)
else:
raise ValueError("Invalid axis type")
def _duplicateAxes(self, axis_map):
"""
Duplicate all of the axes passed to the stream in local Vigilant Attributes
axis_map (dict of axis_name -> Actuator): map of an axis name to an Actuator component
returns (dict str -> VA): axis_name -> new VA.
"""
# Add axis position VA's to the list of hardware VA's
axis_vas = {} # dict of axis_name to duplicated position VA
for va_name, (axis_name, actuator) in axis_map.items():
va = self._duplicateAxis(axis_name, actuator)
axis_vas[va_name] = va
# add attributes to stream
setattr(self, "axis" + va_name[0].upper() + va_name[1:], va)
return axis_vas
# TODO: move to odemis.util ?
def _duplicateVA(self, va, setter=None):
"""
Create a new VA, with same behaviour as the given VA
va (VigilantAttribute): VA to duplicate
setter (None or callable): the setter of the VA
return (VigilantAttribute): new VA
"""
# Find out the type of the VA (without using the exact class, to work
# even if it's proxied)
kwargs = {}
if isinstance(va, (model.ListVA, model.ListVAProxy)):
vacls = model.ListVA
elif hasattr(va, "choices") and isinstance(va.choices, collections.Iterable):
# Enumerated
vacls = model.VAEnumerated
kwargs["choices"] = va.choices
elif hasattr(va, "range") and isinstance(va.range, collections.Iterable):
# Continuous
# TODO: TupleContinuous vs FloatContinuous vs... use range type?
r0 = va.range[0]
if isinstance(r0, tuple):
vacls = model.TupleContinuous
if isinstance(r0[0], numbers.Real):
kwargs["cls"] = numbers.Real # accept _any_ number
# otherwise, the VA will just pick the class from the value
elif isinstance(r0, numbers.Real):
# TODO: distinguish model.IntContinuous, how?
vacls = model.FloatContinuous
else:
raise NotImplementedError(u"Doesn't know how to duplicate VA %s"
% (va,))
kwargs["range"] = va.range
else:
# TODO: FloatVA vs IntVA vs StringVA vs BooleanVA vs TupleVA based on value type? hard to do
vacls = VigilantAttribute
newva = vacls(va.value, readonly=va.readonly, unit=va.unit, setter=setter, **kwargs)
return newva
# Order in which VAs should be set to ensure the values are kept as-is.
# This should be the behaviour of the hardware component... but the driver
# might be buggy, so beware!
VA_ORDER = ("Binning", "Scale", "Resolution", "Translation", "Rotation", "DwellTime",
"TimeRange", "StreakMode", "MCPGain")
def _index_in_va_order(self, va_entry):
"""
return the position of the VA name in VA_ORDER
va_entry (tuple): first element must be the name of the VA
return (int)
"""
name = va_entry[0][3:] # strip "det" or "emt"
try:
return self.VA_ORDER.index(name)
except ValueError: # VA name is not listed => put last
return len(self.VA_ORDER) + 1
# TODO: rename to applyHwVAs and never call unlinkHwVAs?
def _linkHwVAs(self):
"""
Apply the current value of each duplicated hardware VAs from the stream
to the hardware component.
If the hardware value is not accepted as-is, the value of the local
VA will be set to the hardware value.
"""
if self._lvaupdaters:
logging.warning(u"Going to link Hw VAs, while already linked")
# Make sure the VAs are set in the right order to keep values
hwvas = list(self._hwvas.items()) # must be a list
hwvas.sort(key=self._index_in_va_order)
for vaname, hwva in hwvas:
if hwva.readonly:
continue
lva = getattr(self, vaname)
try:
hwva.value = lva.value
except Exception:
logging.debug(u"Failed to set VA %s to value %s on hardware",
vaname, lva.value)
# Immediately read the VAs back, to read the actual values accepted by the hardware
for vaname, hwva in hwvas:
if hwva.readonly:
continue
lva = getattr(self, vaname)
try:
lva.value = hwva.value
except Exception:
logging.debug(u"Failed to update VA %s to value %s from hardware",
vaname, hwva.value)
# Hack: There shouldn't be a resolution local VA, but for now there is.
# In order to set it to some correct value, we read back from the hardware.
if vaname[3:] == "Resolution":
updater = functools.partial(self._va_sync_from_hw, lva)
self._lvaupdaters[vaname] = updater
hwva.subscribe(updater)
# Note: for now disabled. Normally, we don't need to set the VA value
# via the hardware VA, and it causes confusion in some cases if the
# hardware settings are changed temporarily for some reason.
# make sure the local VA value is synchronised
# for vaname, hwva in self._hwvas.items():
# if hwva.readonly:
# continue
# lva = getattr(self, vaname)
# updater = functools.partial(self._va_sync_from_hw, lva)
# self._lvaupdaters[vaname] = updater
# hwva.subscribe(updater, init=True)
def _unlinkHwVAs(self):
for vaname, updater in list(self._lvaupdaters.items()):
hwva = self._hwvas[vaname]
hwva.unsubscribe(updater)
del self._lvaupdaters[vaname]
def _getEmitterVA(self, vaname):
"""
Give the VA for controlling the setting of the emitter, either the local
one, or if it doesn't exist, directly the hardware one.
vaname (str): name of the VA as on the hardware
return (VigilantAttribute): the local VA or the Hw VA
raises
AttributeError: if VA doesn't exist
"""
lname = "emt" + vaname[0].upper() + vaname[1:]
try:
return getattr(self, lname)
except AttributeError:
hwva = getattr(self._emitter, vaname)
if not isinstance(hwva, VigilantAttributeBase):
raise AttributeError(u"Emitter has not VA %s" % (vaname,))
return hwva
def _getDetectorVA(self, vaname):
"""
Give the VA for controlling the setting of the detector, either the local
one, or if it doesn't exist, directly the hardware one.
vaname (str): name of the VA as on the hardware
return (VigilantAttribute): the local VA or the Hw VA
raises
AttributeError: if VA doesn't exist
"""
lname = "det" + vaname[0].upper() + vaname[1:]
try:
return getattr(self, lname)
except AttributeError:
hwva = getattr(self._detector, vaname)
if not isinstance(hwva, VigilantAttributeBase):
raise AttributeError(u"Detector has not VA %s" % (vaname,))
return hwva
def _linkHwAxes(self):
""""
Link the axes, which are defined as local VA's,
to their respective hardware component values. Blocking function.
If local axis Vigilant Attributes's are specified, write the values of the local axis VA
to the real hardware
"""
if hasattr(self, "_axis_vas"):
moving_axes = []
moves = {} # Actuator -> move {axis -> value}
for va_name, (axis_name, actuator) in self._axis_map.items():
va = self._axis_vas[va_name]
pos = va.value
moves.setdefault(actuator, {})[axis_name] = pos
logging.info("Moving actuator %s axis %s to position %s.", actuator.name, axis_name, pos)
# subscribe to update the axis when the stream plays
ax_updater = functools.partial(self._update_linked_axis, va_name)
self._axisvaupdaters[va_name] = ax_updater
va.subscribe(ax_updater)
# coordinate the moves in sequence, one per actuator
for act, mv in moves.items():
# subscribe to the position VA's of the actuators
pos_updater = functools.partial(self._update_linked_position, act)
self._posupdaters[act] = pos_updater
act.position.subscribe(pos_updater)
try:
f = act.moveAbs(mv)
f.add_done_callback(self._onAxisMoveDone)
moving_axes.append(f)
except Exception:
logging.exception("Failed to move actuator %s axis %s.", act.name, mv)
for f in moving_axes:
try:
f.result()
except Exception:
logging.exception("Failed to move axis.")
def _onAxisMoveDone(self, f):
"""
Callback method, which checks that the move is actually finished.
:param f: (future)
"""
try:
f.result()
except Exception:
logging.exception("Failed to move axis.")
def _update_linked_position(self, act, pos):
""" Subscriber called when the actuator position changes.
update the linked axis VA's with the new position value
"""
if not self.is_active.value:
return
for axis_name, axpos in pos.items():
for va_name, (real_axis_name, actuator) in self._axis_map.items():
if axis_name == real_axis_name and act == actuator:
va = self._axis_vas[va_name]
break
else:
# some axes might not necessarily be in the axis map. Skip them
continue
# before updating va
va.unsubscribe(self._axisvaupdaters[va_name])
# update va
va.value = axpos
logging.info("Updating local axis %s to position %s", va_name, axpos)
va.subscribe(self._axisvaupdaters[va_name])
return pos
def _update_linked_axis(self, va_name, pos):
""" Update the value of a linked hardware axis VA
when the stream is active
"""
if not self.is_active.value:
return
try:
real_axis_name, act = self._axis_map[va_name]
logging.info("Moving actuator %s axis %s to position %s.", act.name, real_axis_name, pos)
f = act.moveAbs({real_axis_name: pos})
# TODO: ideally, it would block, so that the the caller knows when the move is complete.
# However, this requires that the GUI calls this function is a separate thread.
# f.result()
except Exception:
logging.exception("Failed to move axis.")
return pos
def _unlinkHwAxes(self):
"""
Unlink the axes to the hardware components
"""
if hasattr(self, "_axis_vas"):
for va_name, updater in list(self._axisvaupdaters.items()):
va = self._axis_vas[va_name]
va.unsubscribe(updater)
del self._axisvaupdaters[va_name]
for actuator, updater in list(self._posupdaters.items()):
actuator.position.unsubscribe(updater)
del self._posupdaters[actuator]
def prepare(self):
"""
Take care of any action required to be taken before the stream becomes
active.
Note: it's not necessary to call it before a stream is set to active.
If it was not called, this function will automatically be called when
starting the stream.
returns (model.ProgressiveFuture): Progress of preparation
"""
if self.is_active.value:
logging.warning("Prepare of stream %s called while already active", self.name.value)
# TODO: raise an error
return self._prepare()
def _prepare(self):
"""
Take care of any action required to be taken before the stream becomes
active.
returns (model.ProgressiveFuture): Progress of preparation
"""
logging.debug(u"Preparing stream %s ...", self.name.value)
# actually indicate that preparation has been triggered, don't wait for
# it to be completed
self._prepared = True
return self._prepare_opm()
def _prepare_opm(self):
if self._opm is None:
return model.InstantaneousFuture()
logging.debug(u"Setting optical path for %s", self.name.value)
f = self._opm.setPath(self)
return f
def estimateAcquisitionTime(self):
""" Estimate the time it will take to acquire one image with the current
settings of the detector and emitter.
returns (float): approximate time in seconds that acquisition will take
"""
# This default implementation returns the shortest possible time, taking
# into account a minimum overhead. (As in, acquisition will never take
# less than 0.1 seconds)
return self.SETUP_OVERHEAD
def _setStatus(self, level, message=None):
"""
Set the status
level (0<=int or None): the bigger the more important, same interpretation
as logging.
message (str or None): the status message
"""
if level is None and message is not None:
logging.warning(u"Setting status with no level and message %s", message)
self.status._value = (level, message)
self.status.notify(self.status.value)
def onTint(self, value):
if self.raw:
raw = self.raw[0]
else:
raw = None
if raw is not None:
raw.metadata[model.MD_USER_TINT] = img.tint_to_md_format(value)
self._shouldUpdateImage()
def _is_active_setter(self, active):
"""
Called just before the Stream becomes (in)active
"""
# Note: the setter can be called even if the value don't change
if self.is_active.value != active:
if active:
# This is done in a setter to ensure that as soon as is_active is
# True, all the HwVAs are already synchronised, and this avoids
# the VA setter to catch again the change
self._linkHwVAs()
self._linkHwAxes()
# TODO: create generic fct linkHWAxes and call here
else:
self._unlinkHwVAs()
self._unlinkHwAxes()
return active
def _updateDRange(self, data=None):
"""
Update the ._drange, with whatever data is known so far.
data (None or DataArray): data on which to base the detection. If None,
it will try to use .raw, and if there is nothing, will just use the
detector information.
"""
# Note: it feels like live and static streams could have a separate
# version, but detecting a stream has no detector is really not costly
# and static stream can still have changing drange (eg, when picking a
# different 4th or 5th dimension). => just a generic version that tries
# to handle all the cases.
# Note: Add a lock to avoid calling this fct simultaneously. When starting
# Odemis, the image thread and the histogram thread call this method.
# It happened sometimes that self._drange_unreliable was already updated, while
# self._drange was not updated yet. This resulted in incorrectly updated min and max
# values for drange calc by the second thread as using the new
# self._drange_unreliable but the old self._drange values.
with self._dRangeLock:
if data is None and self.raw:
data = self.raw[0]
if isinstance(data, model.DataArrayShadow):
# if the image is pyramidal, use the smaller image
data = self._getMergedRawImage(data, data.maxzoom)
# 2 types of drange management:
# * dtype is int -> follow MD_BPP/shape/dtype.max, and if too wide use data.max
# * dtype is float -> data.max
if data is not None:
if data.dtype.kind in "biu":
try:
depth = 2 ** data.metadata[model.MD_BPP]
if depth <= 1:
logging.warning("Data reports a BPP of %d", data.metadata[model.MD_BPP])
raise ValueError()
drange = (0, depth - 1)
except (KeyError, ValueError):
drange = self._guessDRangeFromDetector()
if drange is None:
idt = numpy.iinfo(data.dtype)
drange = (idt.min, idt.max)
elif data.dtype.kind == "i": # shift the range for signed data
depth = drange[1] + 1
drange = (-depth // 2, depth // 2 - 1)
# If range is too big to be used as is => look really at the data
if (drange[1] - drange[0] > 4095 and
(self._drange is None or
self._drange_unreliable or
self._drange[1] - self._drange[0] < drange[1] - drange[0])):
mn = int(data.view(numpy.ndarray).min())
mx = int(data.view(numpy.ndarray).max())
if self._drange is not None and not self._drange_unreliable:
# Only allow the range to expand, to avoid it constantly moving
mn = min(mn, self._drange[0])
mx = max(mx, self._drange[1])
# Try to find "round" values. Either:
# * mn = 0, mx = max rounded to next power of 2 -1
# * mn = min, width = width rounded to next power of 2
# => pick the one which gives the smallest width
diff = max(2, mx - mn + 1)
diffrd = 2 ** int(math.ceil(math.log(diff, 2))) # next power of 2
width0 = max(2, mx + 1)
width0rd = 2 ** int(math.ceil(math.log(width0, 2))) # next power of 2
if diffrd < width0rd:
drange = (mn, mn + diffrd - 1)
else:
drange = (0, width0rd - 1)
else: # float
# cast to ndarray to ensure a scalar (instead of a DataArray)
drange = (data.view(numpy.ndarray).min(),
data.view(numpy.ndarray).max())
if self._drange is not None and not self._drange_unreliable:
drange = (min(drange[0], self._drange[0]),
max(drange[1], self._drange[1]))
if drange:
self._drange_unreliable = False
else:
# no data, give a large estimate based on the detector
drange = self._guessDRangeFromDetector()
self._drange_unreliable = True
if drange:
# This VA will clip its own value if it is out of range
self.intensityRange.range = ((drange[0], drange[0]),
(drange[1], drange[1]))
self._drange = drange
def _guessDRangeFromDetector(self):
try:
# If the detector has .bpp, use this info
try:
depth = 2 ** self._getDetectorVA("bpp").value
except AttributeError:
# The last element of the shape indicates the bit depth, which
# is used for brightness/contrast adjustment.
depth = self._detector.shape[-1]
if depth <= 1:
logging.warning("Detector %s report a depth of %d",
self._detector.name, depth)
raise ValueError()
drange = (0, depth - 1)
except (AttributeError, IndexError, ValueError):
drange = None
return drange
def _getDisplayIRange(self):
"""
return the min/max values to display. It also updates the intensityRange
VA if needed.
return (number, number): the min/max values to map to black/white. It is
the same type as the data type.
"""
if self.auto_bc.value:
# The histogram might be slightly old, but not too much
# The main thing to pay attention is that the data range is identical
if self.histogram._edges != self._drange:
self._updateHistogram()
irange = sorted(self.intensityRange.value)
return irange
def _find_metadata(self, md):
"""
Find the useful metadata for a 2D spatial projection from the metadata
of a raw image
return (dict MD_* -> value)
"""
md = dict(md) # duplicate to not modify the original metadata
img.mergeMetadata(md) # applies correction metadata
try:
pos = md[MD_POS]
except KeyError:
# Note: this log message is disabled to prevent log flooding
# logging.warning("Position of image unknown")
pos = (0, 0)
try:
pxs = md[MD_PIXEL_SIZE]
except KeyError:
# Hopefully it'll be within the same magnitude, and otherwise
# default to small value so that it easily fits in the FoV.
spxs = md.get(model.MD_SENSOR_PIXEL_SIZE, (100e-9, 100e-9))
binning = md.get(model.MD_BINNING, (1, 1))
pxs = spxs[0] / binning[0], spxs[1] / binning[1]
# Note: this log message is disabled to prevent log flooding
# msg = "Pixel density of image unknown, using sensor size"
# logging.warning(msg)
rot = md.get(MD_ROTATION, 0)
she = md.get(MD_SHEAR, 0)
new_md = {MD_PIXEL_SIZE: pxs,
MD_POS: pos,
MD_ROTATION: rot,
MD_SHEAR: she}
# Not necessary, but handy to debug latency problems
if MD_ACQ_DATE in md:
new_md[MD_ACQ_DATE] = md[MD_ACQ_DATE]
return new_md
def _projectXY2RGB(self, data, tint=(255, 255, 255)):
"""
Project a 2D spatial DataArray into a RGB representation
data (DataArray): 2D DataArray
tint ((int, int, int)): colouration of the image, in RGB.
return (DataArray): 3D DataArray
"""
irange = self._getDisplayIRange()
rgbim = img.DataArray2RGB(data, irange, tint)
rgbim.flags.writeable = False
# Commented to prevent log flooding
# if model.MD_ACQ_DATE in data.metadata:
# logging.debug("Computed RGB projection %g s after acquisition",
# time.time() - data.metadata[model.MD_ACQ_DATE])
md = self._find_metadata(data.metadata)
md[model.MD_DIMS] = "YXC" # RGB format
return model.DataArray(rgbim, md)
def _shouldUpdateImage(self):
"""
Ensures that the image VA will be updated in the "near future".
"""
# If the previous request is still being processed, the event
# synchronization allows to delay it (without accumulation).
self._im_needs_recompute.set()
@staticmethod
def _image_thread(wstream, period=0.1):
""" Called as a separate thread, and recomputes the image whenever it receives an event
asking for it.
Args:
wstream (Weakref to a Stream): the stream to follow
period ( float > 0) (Seconds): Minimum time in second between two image updates
"""
try:
stream = wstream()
name = stream.name.value
im_needs_recompute = stream._im_needs_recompute
# Only hold a weakref to allow the stream to be garbage collected
# On GC, trigger im_needs_recompute so that the thread can end too
wstream = weakref.ref(stream, lambda o: im_needs_recompute.set())
while True:
del stream
im_needs_recompute.wait() # wait until a new image is available
stream = wstream()
if stream is None:
logging.debug("Stream %s disappeared so ending image update thread", name)
break
tnext = time.time() + period #running with a period of max "period", optional arg standard is 10 Hz
im_needs_recompute.clear()
stream._updateImage()
tnow = time.time()
# sleep a bit to avoid refreshing too fast
tsleep = tnext - tnow
if tsleep > 0.0001:
time.sleep(tsleep)
except Exception:
logging.exception("Image update thread failed")
gc.collect()
def _getMergedRawImage(self, das, z):
"""
Returns the entire raw data of DataArrayShadow at a given zoom level
das (DataArrayShadow): shadow of the raw data
z (int): Zoom level index
return (DataArray): The merged image
"""
# calculates the size of the merged image
width_zoomed = das.shape[1] / (2 ** z)
height_zoomed = das.shape[0] / (2 ** z)
# calculates the number of tiles on both axes
num_tiles_x = int(math.ceil(width_zoomed / das.tile_shape[1]))
num_tiles_y = int(math.ceil(height_zoomed / das.tile_shape[0]))
tiles = []
for x in range(num_tiles_x):
tiles_column = []
for y in range(num_tiles_y):
tile = das.getTile(x, y, z)
tiles_column.append(tile)
tiles.append(tiles_column)
return img.mergeTiles(tiles)
def _updateImage(self):
""" Recomputes the image with all the raw data available
"""
if not self.raw:
return
try:
if not isinstance(self.raw, list):
raise AttributeError(".raw must be a list of DA/DAS")
data = self.raw[0]
bkg = self.background.value
if bkg is not None:
try:
data = img.Subtract(data, bkg)
except Exception as ex:
logging.info("Failed to subtract background data: %s", ex)
dims = data.metadata.get(model.MD_DIMS, "CTZYX"[-data.ndim::])
ci = dims.find("C") # -1 if not found
# is RGB
if dims in ("CYX", "YXC") and data.shape[ci] in (3, 4):
rgbim = img.ensureYXC(data)
rgbim.flags.writeable = False
# merge and ensures all the needed metadata is there
rgbim.metadata = self._find_metadata(rgbim.metadata)
rgbim.metadata[model.MD_DIMS] = "YXC" # RGB format
self.image.value = rgbim
else: # is grayscale
if data.ndim != 2:
data = img.ensure2DImage(data) # Remove extra dimensions (of length 1)
self.image.value = self._projectXY2RGB(data, self.tint.value)
except Exception:
logging.exception("Updating %s %s image", self.__class__.__name__, self.name.value)
# Setter and updater of background don't do much, but allow to be overridden
def _setBackground(self, data):
"""Called when the background is about to be changed"""
return data
def _onBackground(self, data):
"""Called after the background has changed"""
self._shouldUpdateImage()
def _onAutoBC(self, enabled):
# if changing to auto: B/C might be different from the manual values
if enabled:
self._recomputeIntensityRange()
def _onOutliers(self, outliers):
if self.auto_bc.value:
self._recomputeIntensityRange()
def _recomputeIntensityRange(self):
if len(self.histogram._full_hist) == 0: # No histogram yet
return
irange = img.findOptimalRange(self.histogram._full_hist,
self.histogram._edges,
self.auto_bc_outliers.value / 100)
# clip is needed for some corner cases with floats
irange = self.intensityRange.clip(irange)
self.intensityRange.value = irange
def _setIntensityRange(self, irange):
# Not much to do, but force int if the data is int
if self._drange and isinstance(self._drange[1], numbers.Integral):
if not all(isinstance(v, numbers.Integral) for v in irange):
# Round down/up
irange = int(irange[0]), int(math.ceil(irange[1]))
return irange
def _setTint(self, tint):
# The tint VA could be either:
# - a list tuple RGB value (for a tint) or
# - a matplotlib.colors.Colormap object for a custom color map or
# - a string of value TINT_FIT_TO_RGB to indicate fit RGB color mapping or
# - a string of value TINT_RGB_AS_IS that indicates no tint. Will be converted to a black tint
# Enforce this setting
if isinstance(tint, tuple):
# RGB tuple - enforce len of 3
if len(tint) != 3:
raise ValueError("RGB Value for tint should be of length 3")
return tint
elif isinstance(tint, list):
# convert to tuple of len 3
if len(tint) != 3:
raise ValueError("RGB Value for tint should be of length 3")
return tuple(tint)
elif isinstance(tint, matplotlib.colors.Colormap):
return tint
elif tint == TINT_FIT_TO_RGB:
return tint
elif tint == TINT_RGB_AS_IS:
return (255, 255, 255)
else:
raise ValueError("Invalid value for tint VA")
def _onIntensityRange(self, irange):
self._shouldUpdateImage()
def _updateHistogram(self, data=None):
"""
data (DataArray): the raw data to use, default to .raw[0] - background
(if present).
If will also update the intensityRange if auto_bc is enabled.
"""
# Compute histogram and compact version
if data is None:
if not self.raw:
logging.debug("Not computing histogram as .raw is empty")
return
data = self.raw[0]
if isinstance(data, model.DataArrayShadow):
# Pyramidal => use the smallest version
data = self._getMergedRawImage(data, data.maxzoom)
# We only do background subtraction when automatically selecting raw
bkg = self.background.value
if bkg is not None:
try:
data = img.Subtract(data, bkg)
except Exception as ex:
logging.info("Failed to subtract background when computing histogram: %s", ex)
# Depth can change at each image (depends on hardware settings)
self._updateDRange(data)
# Initially, _drange might be None, in which case it will be guessed
hist, edges = img.histogram(data, irange=self._drange)
if hist.size > 256:
chist = img.compactHistogram(hist, 256)
else:
chist = hist
self.histogram._full_hist = hist
self.histogram._edges = edges
# First update the value, before the intensityRange subscribers are called...
self.histogram._value = chist
if self.auto_bc.value:
self._recomputeIntensityRange()
# Notify last, so intensityRange is correct when subscribers get the new histogram
self.histogram.notify(chist)
def _onNewData(self, dataflow, data):
# Commented out to prevent log flooding
# if model.MD_ACQ_DATE in data.metadata:
# logging.debug("Receive raw %g s after acquisition",
# time.time() - data.metadata[model.MD_ACQ_DATE])
if isinstance(self.raw, list):
if not self.raw:
self.raw.append(data)
else:
self.raw[0] = data
else:
logging.error("%s .raw is not a list, so can store new data", self)
self._shouldUpdateImage()
def getPixelCoordinates(self, p_pos):
"""
Translate physical coordinates into data pixel coordinates
Args:
p_pos(tuple float, float): the position in physical coordinates
Returns(tuple int, int or None): the position in pixel coordinates or None if it's outside of the image
"""
if not self.raw:
raise LookupError("Stream has no data")
raw = self.raw[0]
md = self._find_metadata(raw.metadata)
pxs = md.get(model.MD_PIXEL_SIZE, (1e-6, 1e-6))
rotation = md.get(model.MD_ROTATION, 0)
shear = md.get(model.MD_SHEAR, 0)
translation = md.get(model.MD_POS, (0, 0))
size = raw.shape[-1], raw.shape[-2]
# The "shear" argument is not passed in the "AffineTransform" because the formula of the AffineTransform
# uses a horizontal shear, while MD_SHEAR defines a vertical shear. The shear is applied afterwards,
# by updating the transformation matrix.
tform = AffineTransform(rotation=rotation, scale=pxs, translation=translation)
L = numpy.array([(1, 0), (-shear, 1)])
tform.transformation_matrix = numpy.dot(tform.transformation_matrix, L)
pixel_pos_c = tform.inverse()(p_pos)
# a "-" is used for the y coordinate because Y axis has the opposite direction in physical coordinates
pixel_pos = int(pixel_pos_c[0] + size[0] / 2), - int(pixel_pos_c[1] - size[1] / 2)
if 0 <= pixel_pos[0] < size[0] and 0 <= pixel_pos[1] < size[1]:
return pixel_pos
else:
return None
def getRawValue(self, pixel_pos):
"""
Translate pixel coordinates into raw pixel value
Args:
pixel_pos(tuple int, int): the position in pixel coordinates
Returns: the raw "value" of the position. In case the raw data has more than 2 dimensions, it returns an array.
Raise LookupError if raw data not found
"""
raw = self.raw
if not raw:
raise LookupError("Cannot compute pixel raw value as stream has no data")
return raw[0][..., pixel_pos[1], pixel_pos[0]].tolist()
def getBoundingBox(self, im=None):
"""
Get the bounding box in X/Y of the complete data contained.
Args:
im: (DataArray(Shadow) or None): the data of the image if provided. If None, the raw data of the stream
is used.
return (tuple of floats (minx, miny, maxx, maxy)): left,top,right,bottom positions of the bounding box where top < bottom and left < right
Raises:
ValueError: If the stream has no (spatial) data and stream's image is not defined
"""
if im is None:
try:
im = self.image.value
except AttributeError:
im = None
if im is None and self.raw:
im = self.raw[0]
if im is None:
raise ValueError("Cannot compute bounding-box as stream has no data and stream's image is not defined")
return img.getBoundingBox(im)
def getRawMetadata(self):
"""
Gets the raw metadata structure from the stream.
A list of metadata dicts is returned.
"""
return [None if data is None else data.metadata for data in self.raw]
| gpl-2.0 |
rgommers/statsmodels | examples/python/ols.py | 30 | 5601 |
## Ordinary Least Squares
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
np.random.seed(9876789)
# ## OLS estimation
#
# Artificial data:
nsample = 100
x = np.linspace(0, 10, 100)
X = np.column_stack((x, x**2))
beta = np.array([1, 0.1, 10])
e = np.random.normal(size=nsample)
# Our model needs an intercept so we add a column of 1s:
X = sm.add_constant(X)
y = np.dot(X, beta) + e
# Inspect data:
X = sm.add_constant(X)
y = np.dot(X, beta) + e
# Fit and summary:
model = sm.OLS(y, X)
results = model.fit()
print(results.summary())
# Quantities of interest can be extracted directly from the fitted model. Type ``dir(results)`` for a full list. Here are some examples:
print('Parameters: ', results.params)
print('R2: ', results.rsquared)
# ## OLS non-linear curve but linear in parameters
#
# We simulate artificial data with a non-linear relationship between x and y:
nsample = 50
sig = 0.5
x = np.linspace(0, 20, nsample)
X = np.column_stack((x, np.sin(x), (x-5)**2, np.ones(nsample)))
beta = [0.5, 0.5, -0.02, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
# Fit and summary:
res = sm.OLS(y, X).fit()
print(res.summary())
# Extract other quantities of interest:
print('Parameters: ', res.params)
print('Standard errors: ', res.bse)
print('Predicted values: ', res.predict())
# Draw a plot to compare the true relationship to OLS predictions. Confidence intervals around the predictions are built using the ``wls_prediction_std`` command.
prstd, iv_l, iv_u = wls_prediction_std(res)
fig, ax = plt.subplots()
ax.plot(x, y, 'o', label="data")
ax.plot(x, y_true, 'b-', label="True")
ax.plot(x, res.fittedvalues, 'r--.', label="OLS")
ax.plot(x, iv_u, 'r--')
ax.plot(x, iv_l, 'r--')
ax.legend(loc='best');
# ## OLS with dummy variables
#
# We generate some artificial data. There are 3 groups which will be modelled using dummy variables. Group 0 is the omitted/benchmark category.
nsample = 50
groups = np.zeros(nsample, int)
groups[20:40] = 1
groups[40:] = 2
#dummy = (groups[:,None] == np.unique(groups)).astype(float)
dummy = sm.categorical(groups, drop=True)
x = np.linspace(0, 20, nsample)
# drop reference category
X = np.column_stack((x, dummy[:,1:]))
X = sm.add_constant(X, prepend=False)
beta = [1., 3, -3, 10]
y_true = np.dot(X, beta)
e = np.random.normal(size=nsample)
y = y_true + e
# Inspect the data:
print(X[:5,:])
print(y[:5])
print(groups)
print(dummy[:5,:])
# Fit and summary:
res2 = sm.OLS(y, X).fit()
print(res.summary())
# Draw a plot to compare the true relationship to OLS predictions:
prstd, iv_l, iv_u = wls_prediction_std(res2)
fig, ax = plt.subplots()
ax.plot(x, y, 'o', label="Data")
ax.plot(x, y_true, 'b-', label="True")
ax.plot(x, res2.fittedvalues, 'r--.', label="Predicted")
ax.plot(x, iv_u, 'r--')
ax.plot(x, iv_l, 'r--')
ax.legend(loc="best")
# ## Joint hypothesis test
#
# ### F test
#
# We want to test the hypothesis that both coefficients on the dummy variables are equal to zero, that is, $R \times \beta = 0$. An F test leads us to strongly reject the null hypothesis of identical constant in the 3 groups:
R = [[0, 1, 0, 0], [0, 0, 1, 0]]
print(np.array(R))
print(res2.f_test(R))
# You can also use formula-like syntax to test hypotheses
print(res2.f_test("x2 = x3 = 0"))
# ### Small group effects
#
# If we generate artificial data with smaller group effects, the T test can no longer reject the Null hypothesis:
beta = [1., 0.3, -0.0, 10]
y_true = np.dot(X, beta)
y = y_true + np.random.normal(size=nsample)
res3 = sm.OLS(y, X).fit()
print(res3.f_test(R))
print(res3.f_test("x2 = x3 = 0"))
# ### Multicollinearity
#
# The Longley dataset is well known to have high multicollinearity. That is, the exogenous predictors are highly correlated. This is problematic because it can affect the stability of our coefficient estimates as we make minor changes to model specification.
from statsmodels.datasets.longley import load_pandas
y = load_pandas().endog
X = load_pandas().exog
X = sm.add_constant(X)
# Fit and summary:
ols_model = sm.OLS(y, X)
ols_results = ols_model.fit()
print(ols_results.summary())
# #### Condition number
#
# One way to assess multicollinearity is to compute the condition number. Values over 20 are worrisome (see Greene 4.9). The first step is to normalize the independent variables to have unit length:
for i, name in enumerate(X):
if name == "const":
continue
norm_x[:,i] = X[name]/np.linalg.norm(X[name])
norm_xtx = np.dot(norm_x.T,norm_x)
# Then, we take the square root of the ratio of the biggest to the smallest eigen values.
eigs = np.linalg.eigvals(norm_xtx)
condition_number = np.sqrt(eigs.max() / eigs.min())
print(condition_number)
# #### Dropping an observation
#
# Greene also points out that dropping a single observation can have a dramatic effect on the coefficient estimates:
ols_results2 = sm.OLS(y.ix[:14], X.ix[:14]).fit()
print("Percentage change %4.2f%%\n"*7 % tuple([i for i in (ols_results2.params - ols_results.params)/ols_results.params*100]))
# We can also look at formal statistics for this such as the DFBETAS -- a standardized measure of how much each coefficient changes when that observation is left out.
infl = ols_results.get_influence()
# In general we may consider DBETAS in absolute value greater than $2/\sqrt{N}$ to be influential observations
2./len(X)**.5
print(infl.summary_frame().filter(regex="dfb"))
| bsd-3-clause |
xuewei4d/scikit-learn | examples/linear_model/plot_ard.py | 43 | 3912 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
We also plot predictions and uncertainties for ARD
for one dimensional regression using polynomial feature expansion.
Note the uncertainty starts going up on the right side of the plot.
This is because these test samples are outside of the range of the training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
# #############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
# #############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
# #############################################################################
# Plot the true weights, the estimated weights, the histogram of the
# weights, and predictions with standard deviations
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], np.full(len(relevant_features), 5.),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
# Plotting some predictions for polynomial regression
def f(x, noise_amount):
y = np.sqrt(x) * np.sin(x)
noise = np.random.normal(0, 1, len(x))
return y + noise_amount * noise
degree = 10
X = np.linspace(0, 10, 100)
y = f(X, noise_amount=1)
clf_poly = ARDRegression(threshold_lambda=1e5)
clf_poly.fit(np.vander(X, degree), y)
X_plot = np.linspace(0, 11, 25)
y_plot = f(X_plot, noise_amount=0)
y_mean, y_std = clf_poly.predict(np.vander(X_plot, degree), return_std=True)
plt.figure(figsize=(6, 5))
plt.errorbar(X_plot, y_mean, y_std, color='navy',
label="Polynomial ARD", linewidth=2)
plt.plot(X_plot, y_plot, color='gold', linewidth=2,
label="Ground Truth")
plt.ylabel("Output y")
plt.xlabel("Feature X")
plt.legend(loc="lower left")
plt.show()
| bsd-3-clause |
MatteusDeloge/opengrid | notebooks/job_electricity_standby.py | 1 | 6293 |
# coding: utf-8
# In[ ]:
# opengrid imports
from opengrid.library import misc, houseprint, caching, analysis
from opengrid import config
c=config.Config()
# other imports
import pandas as pd
import charts
import numpy as np
import os
# configuration for the plots
DEV = c.get('env', 'type') == 'dev' # DEV is True if we are in development environment, False if on the droplet
if not DEV:
# production environment: don't try to display plots
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import HourLocator, DateFormatter, AutoDateLocator, num2date
if DEV:
if c.get('env', 'plots') == 'inline':
get_ipython().magic(u'matplotlib inline')
else:
get_ipython().magic(u'matplotlib qt')
else:
pass # don't try to render plots
plt.rcParams['figure.figsize'] = 12,8
# In[ ]:
hp = houseprint.Houseprint()
sensors = hp.get_sensors(sensortype='electricity') # sensor objects
# Remove some sensors
exclude = [
'565de0a7dc64d8370aa321491217b85f' # 3E
]
solar = [x.key for x in hp.search_sensors(type='electricity', system='solar')]
exclude += solar
for s in sensors:
if s.key in exclude:
sensors.remove(s)
hp.init_tmpo()
# In[ ]:
#hp.sync_tmpos()
# In[ ]:
# The first time, this will take a very looong time to get all the detailed data for building the cache
# Afterwards, this is quick
caching.cache_results(hp=hp, sensors=sensors, function='daily_min', resultname='elec_daily_min')
caching.cache_results(hp=hp, sensors=sensors, function='daily_max', resultname='elec_daily_max')
# In[ ]:
cache_min = caching.Cache(variable='elec_daily_min')
cache_max = caching.Cache(variable='elec_daily_max')
dfdaymin = cache_min.get(sensors=sensors)
dfdaymax = cache_max.get(sensors=sensors)
# The next plot shows that some periods are missing. Due to the cumulative nature of the electricity counter, we still have the total consumption. However, it is spread out of the entire period. So we don't know the standby power during these days, and we have to remove those days.
# In[ ]:
if DEV:
sensor = hp.search_sensors(key='3aa4')[0]
df = sensor.get_data(head=pd.Timestamp('20151117'), tail=pd.Timestamp('20160104'))
charts.plot(df, stock=True, show='inline')
# In[ ]:
# Clean out the data:
# First remove days with too low values to be realistic
dfdaymin[dfdaymin < 10] = np.nan
# Now remove days where the minimum=maximum (within 1 Watt difference)
dfdaymin[(dfdaymax - dfdaymin) < 1] = np.nan
# In[ ]:
if DEV:
charts.plot(dfdaymin, stock=True, show='inline')
# In[ ]:
DEV
# In[ ]:
standby_statistics = dfdaymin.T.describe(percentiles=[0.1,0.5,0.9]).T
# In[ ]:
if DEV:
charts.plot(standby_statistics[['10%', '50%', '90%']], stock=True, show='inline')
# In[ ]:
# Get detailed profiles for the last day
now = pd.Timestamp('now', tz='UTC')
start_of_day = now - pd.Timedelta(hours=now.hour, minutes=now.minute, seconds=now.second)
sensors = map(hp.find_sensor, dfdaymin.columns)
df_details = hp.get_data(sensors = sensors, head=start_of_day)
# ### Boxplot approach. Possible for a period of maximum +/- 2 weeks.
# In[ ]:
# choose a period
look_back_days = 10
start = now - pd.Timedelta(days=look_back_days)
dfdaymin_period = dfdaymin.ix[start:].dropna(axis=1, how='all')
# In[ ]:
box = [dfdaymin_period.loc[i,:].dropna().values for i in dfdaymin_period.index]
for sensor in dfdaymin_period.columns:
plt.figure(figsize=(10,5))
ax1=plt.subplot(121)
ax1.boxplot(box, positions=range(len(box)), notch=False)
ax1.plot(range(len(box)), dfdaymin_period[sensor], 'rD', ms=10, label='Sluipverbruik')
xticks = [x.strftime(format='%d/%m') for x in dfdaymin_period.index]
plt.xticks(range(len(box)), xticks, rotation='vertical')
plt.title(hp.find_sensor(sensor).device.key + ' - ' + sensor)
ax1.grid()
ax1.set_ylabel('Watt')
plt.legend(numpoints=1, frameon=False)
ax2=plt.subplot(122)
try:
ax2.plot_date(df_details[sensor].index, df_details[sensor].values, 'b-', label='Afgelopen nacht')
#ax2.xaxis_date() #Put timeseries plot in local time
# rotate the labels
plt.xticks(rotation='vertical')
ax2.set_ylabel('Watt')
ax2.grid()
plt.legend(loc='upper right', frameon=False)
plt.tight_layout()
except Exception as e:
print(e)
else:
plt.savefig(os.path.join(c.get('data', 'folder'), 'figures', 'standby_horizontal_'+sensor+'.png'), dpi=100)
pass
if not DEV:
plt.close()
# ### Percentile approach. Useful for longer time periods, but tweaking of graph still needed
# In[ ]:
# choose a period
look_back_days = 40
start = now - pd.Timedelta(days=look_back_days)
dfdaymin_period = dfdaymin.ix[start:].dropna(axis=1, how='all')
df = dfdaymin_period.join(standby_statistics[['10%', '50%', '90%']], how='left')
# In[ ]:
for sensor in dfdaymin_period.columns:
plt.figure(figsize=(10,8))
ax1=plt.subplot(211)
ax1.plot_date(df.index, df[u'10%'], '-', lw=2, color='g', label=u'10% percentile')
ax1.plot_date(df.index, df[u'50%'], '-', lw=2, color='orange', label=u'50% percentile')
ax1.plot_date(df.index, df[u'90%'], '-', lw=2, color='r', label=u'90% percentile')
ax1.plot_date(df.index, df[sensor], 'rD', ms=7, label='Your standby power')
ax1.legend()
locs, lables=plt.xticks()
xticks = [x.strftime(format='%d/%m') for x in num2date(locs)]
plt.xticks(locs, xticks, rotation='vertical')
plt.title(hp.find_sensor(sensor).device.key + ' - ' + sensor)
ax1.grid()
ax1.set_ylabel('Watt')
ax2=plt.subplot(212)
try:
ax2.plot_date(df_details[sensor].index, df_details[sensor].values, 'b-', label='Afgelopen nacht')
#ax2.xaxis_date() #Put timeseries plot in local time
# rotate the labels
plt.xticks(rotation='vertical')
ax2.set_ylabel('Watt')
ax2.grid()
plt.legend(loc='upper right', frameon=False)
plt.tight_layout()
except Exception as e:
print(e)
else:
plt.savefig(os.path.join(c.get('data', 'folder'), 'figures', 'standby_vertical_'+sensor+'.png'), dpi=100)
pass
if not DEV:
plt.close()
# In[ ]:
| apache-2.0 |
mehdidc/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
meduz/NeuroTools | examples/matlab_vs_python/smallnet.py | 3 | 3997 | # Created by Eugene M. Izhikevich, 2003 Modified by S. Fusi 2007
# Ported to Python by Eilif Muller, 2008.
#
# Notes:
#
# Requires matplotlib,ipython,numpy>=1.0.3
# On a debian/ubuntu based system:
# $ apt-get install python-matplotlib python-numpy ipython
#
# Start ipython with threaded plotting support:
# $ ipython -pylab
#
# At the resulting prompt, run the file by:
# In [1]: execfile('smallnet.py')
# <output>
# In [2]: myplot()
# Modules required
import numpy
import numpy.random as random
from itertools import repeat
# Bug fix for numpy version 1.0.4
numpy.lib.function_base.any = numpy.any
# For measuring performance
import time
t1 = time.time()
# Excitatory and inhibitory neuron counts
Ne = 1000
Ni = 4
N = Ne+Ni
# Synaptic couplings
Je = 250.0/Ne
Ji = 0.0
# reset depolarization (mV)
reset = 0.0
# refractory period (ms)
refr = 2.5
# Synaptic couplings (mV)
S = numpy.zeros((N,N))
S[:,:Ne] = Je*random.uniform(size=(N,Ne))
S[:,:Ni] = -Ji*random.uniform(size=(N,Ni))
# Connectivity
S[:,:Ne][random.uniform(size=(N,Ne))-0.9<=0.0]=0.0
S[:,Ne:][random.uniform(size=(N,Ni))-0.9<=0.0]=0.0
# (mV/ms) (lambda is a python keyword)
leak = 5.0
dt = 0.05
sdt = numpy.sqrt(dt)
# Statistics of the background external current
mb = 3.0; sb = 4.0
mue = mb; sigmae=sb
sigmai = 0.0
# State variable v, initial value of 0
v = numpy.zeros(N)
# Refractory period state variable
r = numpy.zeros(N)
# Spike timings in a list
firings = []
print 'mu(nu=5Hz)=%f' % (mb+Ne*Je*.015-leak,)
print 'mu(nu=100Hz)=%f' % (mb+Ne*Je*.1-leak,)
# total duration of the simulation (ms)
duration = 400.0
t = numpy.arange(0.0,400.0,dt)
vt = numpy.zeros_like(t)
t2 = time.time()
print 'Elapsed time is ', str(t2-t1), ' seconds.'
t1 = time.time()
for i,ti in enumerate(t):
# time for a strong external input
if ti>150.0:
mue = 6.5
sigmae = 7.5
# time to restore the initial statistics of the external current
if ti>300.0:
mue = mb
sigmae = sb
Iext = random.normal(size=N)
Iext[:Ne]*=sigmae
Iext[Ne:]*=sigmai
# Which neurons fired?
fired = numpy.nonzero(v>=20.0)[0]
if len(fired)>0:
# Save mean firing rate of the excitatory neurons
v[fired] = reset
r[fired] = refr
# Append spikes to the spike list
firings+=zip(fired,repeat(ti))
aux = v-dt*(leak-mue)+numpy.sum(S[:,fired],1)+sdt*Iext
else:
aux = v-dt*(leak-mue)+sdt*Iext;
# Neurons not in the refractory period
nr = numpy.nonzero(r<=0)[0]
# Bound voltages above 0.0
v[nr] = numpy.where(aux[nr]>=0.0,aux[nr],0.0)
# Progress refractory variable
nr = numpy.nonzero(r>0)[0]
r[nr]-=dt
# record the voltage trace of the zeroeth neuron
vt[i] = v[0]
t2 = time.time()
print 'Elapsed time is ', str(t2-t1), ' seconds.'
# -------------------------------------------------------------------------
# Plot everything
# -------------------------------------------------------------------------
def myplot():
global firings
t1 = time.time()
figure()
# Membrane potential trace of the zeroeth neuron
subplot(3,1,1)
vt[vt>=20.0]=65.0
plot(t,vt)
ylabel(r'$V-V_{rest}\ \left[\rm{mV}\right]$')
# Raster plot of the spikes of the network
subplot(3,1,2)
myfirings = array(firings)
myfirings_100 = myfirings[myfirings[:,0]<min(100,Ne)]
plot(myfirings_100[:,1],myfirings_100[:,0],'.')
axis([0, duration, 0, min(100,Ne)])
ylabel('Neuron index')
# Mean firing rate of the excitatory population as a function of time
subplot(3,1,3)
# 1 ms resultion of rate histogram
dx = 1.0
x = arange(0,duration,dx)
myfirings_Ne = myfirings[myfirings[:,0]<Ne]
mean_fe,x = numpy.histogram(myfirings_Ne[:,1],x)
plot(x,mean_fe/dx/Ne*1000.0,ls='steps')
ylabel('Hz')
xlabel('time [ms]')
t2 = time.time()
print 'Finished. Elapsed', str(t2-t1), ' seconds.'
#myplot()
| gpl-2.0 |
gautam1168/tardis | tardis/atomic.py | 5 | 24426 | # atomic model
#TODO revisit import statements and reorganize
from scipy import interpolate
import numpy as np
import logging
import os
import h5py
import cPickle as pickle
from astropy import table, units
from collections import OrderedDict
from pandas import DataFrame
import pandas as pd
logger = logging.getLogger(__name__)
default_atom_h5_path = os.path.join(os.path.dirname(__file__), 'data', 'atom_data.h5')
def data_path(fname):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, fname)
atomic_symbols_data = np.recfromtxt(data_path('atomic_symbols.dat'),
names=['atomic_number', 'symbol'])
symbol2atomic_number = OrderedDict(zip(atomic_symbols_data['symbol'], atomic_symbols_data['atomic_number']))
atomic_number2symbol = OrderedDict(atomic_symbols_data)
@PendingDeprecationWarning
def read_atomic_data(fname=None):
return read_basic_atom_data(fname)
def read_hdf5_data(fname, dset_name):
"""This function reads the dataset (dset_name) from the hdf5 file (fname).
In addition it uses the attribute 'units' and parses it to the `~astropy.table.Table` constructor.
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
returns the respective
"""
h5_file = h5py.File(fname, 'r')
dataset = h5_file[dset_name]
data = np.asarray(dataset)
# data_units = dataset.attrs['units']
data_table = table.Table(data)
# for i, col_unit in enumerate(data_units):
# if col_unit == 'n':
# data_table.columns[i].units = None
# elif col_unit == '1':
# data_table.columns[i].units = units.Unit(1)
# else:
# data_table.columns[i].units = units.Unit(col_unit)
h5_file.close()
return data_table
def read_basic_atom_data(fname=None):
"""This function reads the atomic number, symbol, and mass from hdf5 file
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields z[1], symbol, mass[u]
"""
data_table = read_hdf5_data(fname, 'basic_atom_data')
# data_table.columns['mass'] = units.Unit('u').to('g', data_table['mass'])
return data_table
def read_ionization_data(fname=None):
"""This function reads the atomic number, ion number, and ionization energy from hdf5 file
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields z[1], ion[1], ionization_energy[eV]
.. note:: energy from unionized atoms to once-ionized atoms ion = 1, for once ionized
to twice ionized ion=2, etc.
"""
data_table = read_hdf5_data(fname, 'ionization_data')
#data_table.columns['ionization_energy'] = units.Unit('eV').to('erg', data_table.columns['ionization_energy'])
return data_table
def read_levels_data(fname=None):
"""This function reads atomic number, ion number, level_number, energy, g, metastable
information from hdf5 file.
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields z[1], ion[1], level_number, energy, g, metastable
"""
data_table = read_hdf5_data(fname, 'levels_data')
#data_table.columns['energy'].convert_units_to('erg')
#data_table.columns['energy'] = units.Unit('eV').to('erg', data_table.columns['energy'])
return data_table
def read_synpp_refs(fname):
data_table = h5py.File(fname, 'r')['synpp_refs']
return data_table.__array__()
def read_lines_data(fname=None):
"""
This function reads the wavelength, atomic number, ion number, f_ul, f_l and level id information
from hdf5 file
Parameters
----------
fname : `str`, optional
path to atomic.h5 file, if set to None it will read in default data directory
Returns
-------
data : `~astropy.table.Table`
table with fields wavelength, atomic_number, ion_number, f_ul, f_lu, level_id_lower, level_id_upper.
"""
data_table = read_hdf5_data(fname, 'lines_data')
#data_table.columns['ionization_energy'].convert_units_to('erg')
return data_table
def read_zeta_data(fname):
"""
This function reads the recombination coefficient data from the HDF5 file
:return:
"""
if fname is None:
raise ValueError('fname can not be "None" when trying to use NebularAtom')
if not os.path.exists(fname):
raise IOError('HDF5 File doesn\'t exist')
h5_file = h5py.File(fname, 'r')
if 'zeta_data' not in h5_file.keys():
raise ValueError('zeta_data not available in this HDF5-data file. It can not be used with NebularAtomData')
zeta_data = h5_file['zeta_data']
t_rads = zeta_data.attrs['t_rad']
return pd.DataFrame(zeta_data[:,2:], index=pd.MultiIndex.from_arrays(zeta_data[:,:2].transpose().astype(int)),
columns=t_rads)
def read_collision_data(fname):
if fname is None:
raise ValueError('fname can not be "None" when trying to use NebularAtom')
if not os.path.exists(fname):
raise IOError('HDF5 File doesn\'t exist')
h5_file = h5py.File(fname, 'r')
if 'collision_data' not in h5_file.keys():
raise ValueError('collision_data not available in this HDF5-data file. It can not be used with NLTE')
collision_data = np.array(h5_file['collision_data'])
collision_temperatures = h5_file['collision_data'].attrs['temperatures']
return collision_data, collision_temperatures
def read_ion_cx_data(fname):
try:
h5_file = h5py.File(fname, 'r')
ion_cx_th_data = h5_file['ionization_cx_threshold']
ion_cx_sp_data = h5_file['ionization_cx_support']
return ion_cx_th_data, ion_cx_sp_data
except IOError, err:
print(err.errno)
print(err)
logger.critical('Cannot import. Error opening the file to read ionization_cx')
def read_macro_atom_data(fname):
if fname is None:
raise ValueError('fname can not be "None" when trying to use NebularAtom')
if not os.path.exists(fname):
raise IOError('HDF5 File doesn\'t exist')
h5_file = h5py.File(fname, 'r')
if 'macro_atom_data' not in h5_file.keys():
raise ValueError('Macro Atom Data (macro_atom_data) is not in this HDF5-data file. '
'It is needed for complex line interaction')
macro_atom_data = h5_file['macro_atom_data']
macro_atom_counts = h5_file['macro_atom_references']
return macro_atom_data, macro_atom_counts
class AtomData(object):
"""
Class for storing atomic data
AtomData
---------
Parameters
----------
basic_atom_data : `~astropy.table.Table`
containing the basic atom data: z, symbol, and mass
ionization_data : ~astropy.table.Table
containing the ionization data: z, ion, and ionization energy
::important to note here is that ion describes the final ion state
e.g. H I - H II is described with ion=2
levels : ~astropy.table.Table
containing the levels data: z, ion, level_number, energy, g
lines : ~astropy.table.Table
containing the lines data: wavelength, z, ion, levels_number_lower,
levels_number_upper, f_lu, f_ul
macro_atom_data : tuple of ~astropy.table.Table
default ~None, a tuple of the macro-atom data and macro-atom references
zeta_data : ~dict of interpolation objects
default ~None
"""
@classmethod
def from_hdf5(cls, fname=None):
"""
Function to read all the atom data from a special TARDIS HDF5 File.
Parameters
----------
fname: str, optional
the default for this is `None` and then it will use the very limited atomic_data shipped with TARDIS
For more complex atomic data please contact the authors.
use_macro_atom:
default `False`. Set to `True`, if you want to read in macro_atom_data
"""
if fname is None:
fname = default_atom_h5_path
if not os.path.exists(fname):
raise ValueError("Supplied Atomic Model Database %s does not exists" % fname)
atom_data = read_basic_atom_data(fname)
ionization_data = read_ionization_data(fname)
levels_data = read_levels_data(fname)
lines_data = read_lines_data(fname)
with h5py.File(fname, 'r') as h5_file:
h5_datasets = h5_file.keys()
if 'macro_atom_data' in h5_datasets:
macro_atom_data = read_macro_atom_data(fname)
else:
macro_atom_data = None
if 'zeta_data' in h5_datasets:
zeta_data = read_zeta_data(fname)
else:
zeta_data = None
if 'collision_data' in h5_datasets:
collision_data, collision_data_temperatures = read_collision_data(fname)
else:
collision_data, collision_data_temperatures = (None, None)
if 'synpp_refs' in h5_datasets:
synpp_refs = read_synpp_refs(fname)
else:
synpp_refs = None
if 'ion_cx_data' in h5_datasets and 'ion_cx_data' in h5_datasets:
ion_cx_data = read_ion_cx_data(fname)
else:
ion_cx_data = None
atom_data = cls(atom_data=atom_data, ionization_data=ionization_data, levels_data=levels_data,
lines_data=lines_data, macro_atom_data=macro_atom_data, zeta_data=zeta_data,
collision_data=(collision_data, collision_data_temperatures), synpp_refs=synpp_refs,
ion_cx_data=ion_cx_data)
with h5py.File(fname, 'r') as h5_file:
atom_data.uuid1 = h5_file.attrs['uuid1']
atom_data.md5 = h5_file.attrs['md5']
atom_data.version = h5_file.attrs.get('database_version', None)
if atom_data.version is not None:
atom_data.data_sources = pickle.loads(h5_file.attrs['data_sources'])
logger.info('Read Atom Data with UUID=%s and MD5=%s', atom_data.uuid1, atom_data.md5)
return atom_data
def __init__(self, atom_data, ionization_data, levels_data, lines_data, macro_atom_data=None, zeta_data=None,
collision_data=None, synpp_refs=None, ion_cx_data=None):
if macro_atom_data is not None:
self.has_macro_atom = True
self.macro_atom_data_all = DataFrame(macro_atom_data[0].__array__())
self.macro_atom_references_all = DataFrame(macro_atom_data[1].__array__())
else:
self.has_macro_atom = False
if ion_cx_data is not None:
self.has_ion_cx_data = True
#TODO:Farm a panda here
self.ion_cx_th_data = DataFrame(np.array(ion_cx_data[0]))
self.ion_cx_th_data.set_index(['atomic_number', 'ion_number', 'level_id'], inplace=True)
self.ion_cx_sp_data = DataFrame(np.array(ion_cx_data[1]))
self.ion_cx_sp_data.set_index(['atomic_number', 'ion_number', 'level_id'])
else:
self.has_ion_cx_data = False
if zeta_data is not None:
self.zeta_data = zeta_data
self.has_zeta_data = True
else:
self.has_zeta_data = False
if collision_data[0] is not None:
self.collision_data = DataFrame(collision_data[0])
self.collision_data_temperatures = collision_data[1]
self.collision_data.set_index(['atomic_number', 'ion_number', 'level_number_lower', 'level_number_upper'],
inplace=True)
self.has_collision_data = True
else:
self.has_collision_data = False
if synpp_refs is not None:
self.has_synpp_refs = True
self.synpp_refs = pd.DataFrame(synpp_refs)
self.synpp_refs.set_index(['atomic_number', 'ion_number'], inplace=True)
else:
self.has_synpp_refs = False
self.atom_data = DataFrame(atom_data.__array__())
self.atom_data.set_index('atomic_number', inplace=True)
self.atom_data.mass = units.Unit('u').to('g', self.atom_data.mass.values)
self.ionization_data = DataFrame(ionization_data.__array__())
self.ionization_data.set_index(['atomic_number', 'ion_number'], inplace=True)
self.ionization_data.ionization_energy = units.Unit('eV').to('erg',
self.ionization_data.ionization_energy.values)
self._levels = DataFrame(levels_data.__array__())
self._levels.energy = units.Unit('eV').to('erg', self._levels.energy.values)
self._lines = DataFrame(lines_data.__array__())
self._lines.set_index('line_id', inplace=True)
self._lines['nu'] = units.Unit('angstrom').to('Hz', self._lines['wavelength'], units.spectral())
self._lines['wavelength_cm'] = units.Unit('angstrom').to('cm', self._lines['wavelength'])
#tmp_lines_index = pd.MultiIndex.from_arrays(self.lines)
#self.lines_inde
self.symbol2atomic_number = OrderedDict(zip(self.atom_data['symbol'].values, self.atom_data.index))
self.atomic_number2symbol = OrderedDict(zip(self.atom_data.index, self.atom_data['symbol']))
def prepare_atom_data(self, selected_atomic_numbers, line_interaction_type='scatter', max_ion_number=None,
nlte_species=[]):
"""
Prepares the atom data to set the lines, levels and if requested macro atom data.
This function mainly cuts the `levels` and `lines` by discarding any data that is not needed (any data
for atoms that are not needed
Parameters
----------
selected_atoms : `~set`
set of selected atom numbers, e.g. set([14, 26])
line_interaction_type : `~str`
can be 'scatter', 'downbranch' or 'macroatom'
max_ion_number : `~int`
maximum ion number to be included in the calculation
"""
self.selected_atomic_numbers = selected_atomic_numbers
self.nlte_species = nlte_species
self._levels = self._levels.reset_index()
self.levels = self._levels.copy()
self.levels = self.levels[self.levels['atomic_number'].isin(self.selected_atomic_numbers)]
if max_ion_number is not None:
self.levels = self.levels[self.levels['ion_number'] <= max_ion_number]
self.levels = self.levels.set_index(['atomic_number', 'ion_number', 'level_number'])
self.levels_index = pd.Series(np.arange(len(self.levels), dtype=int), index=self.levels.index)
#cutting levels_lines
self.lines = self._lines.copy()
self.lines = self.lines[self.lines['atomic_number'].isin(self.selected_atomic_numbers)]
if max_ion_number is not None:
self.lines = self.lines[self.lines['ion_number'] <= max_ion_number]
self.lines.sort('wavelength', inplace=True)
self.lines_index = pd.Series(np.arange(len(self.lines), dtype=int), index=self.lines.index)
tmp_lines_lower2level_idx = pd.MultiIndex.from_arrays([self.lines['atomic_number'], self.lines['ion_number'],
self.lines['level_number_lower']])
self.lines_lower2level_idx = self.levels_index.ix[tmp_lines_lower2level_idx].values.astype(np.int64)
tmp_lines_upper2level_idx = pd.MultiIndex.from_arrays([self.lines['atomic_number'], self.lines['ion_number'],
self.lines['level_number_upper']])
self.lines_upper2level_idx = self.levels_index.ix[tmp_lines_upper2level_idx].values.astype(np.int64)
self.atom_ion_index = None
self.levels_index2atom_ion_index = None
if self.has_macro_atom and not (line_interaction_type == 'scatter'):
self.macro_atom_data = self.macro_atom_data_all[
self.macro_atom_data_all['atomic_number'].isin(self.selected_atomic_numbers)]
if max_ion_number is not None:
self.macro_atom_data = self.macro_atom_data[self.macro_atom_data['ion_number'] <= max_ion_number]
self.macro_atom_references = self.macro_atom_references_all[
self.macro_atom_references_all['atomic_number'].isin(
self.selected_atomic_numbers)]
if max_ion_number is not None:
self.macro_atom_references = self.macro_atom_references[
self.macro_atom_references['ion_number'] <= max_ion_number]
if line_interaction_type == 'downbranch':
self.macro_atom_data = self.macro_atom_data[(self.macro_atom_data['transition_type'] == -1).values]
self.macro_atom_references = self.macro_atom_references[self.macro_atom_references['count_down'] > 0]
self.macro_atom_references['count_total'] = self.macro_atom_references['count_down']
self.macro_atom_references['block_references'] = np.hstack((0,
np.cumsum(self.macro_atom_references[
'count_down'].values[:-1])))
elif line_interaction_type == 'macroatom':
self.macro_atom_references['block_references'] = np.hstack((0,
np.cumsum(self.macro_atom_references[
'count_total'].values[:-1])))
self.macro_atom_references.set_index(['atomic_number', 'ion_number', 'source_level_number'], inplace=True)
self.macro_atom_references['references_idx'] = np.arange(len(self.macro_atom_references))
self.macro_atom_data['lines_idx'] = self.lines_index.ix[self.macro_atom_data['transition_line_id']].values
tmp_lines_upper2level_idx = pd.MultiIndex.from_arrays(
[self.lines['atomic_number'], self.lines['ion_number'],
self.lines['level_number_upper']])
self.lines_upper2macro_reference_idx = self.macro_atom_references['references_idx'].ix[
tmp_lines_upper2level_idx].values.astype(np.int64)
tmp_macro_destination_level_idx = pd.MultiIndex.from_arrays([self.macro_atom_data['atomic_number'],
self.macro_atom_data['ion_number'],
self.macro_atom_data[
'destination_level_number']])
if line_interaction_type == 'macroatom':
self.macro_atom_data['destination_level_idx'] = self.macro_atom_references['references_idx'].ix[
tmp_macro_destination_level_idx].values.astype(np.int64)
elif line_interaction_type == 'downbranch':
self.macro_atom_data['destination_level_idx'] = (np.ones(len(self.macro_atom_data)) * -1).astype(
np.int64)
self.nlte_data = NLTEData(self, nlte_species)
def __repr__(self):
return "<Atomic Data UUID=%s MD5=%s Lines=%d Levels=%d>" % \
(self.uuid1, self.md5, self.lines.atomic_number.count(), self.levels.energy.count())
class NLTEData(object):
def __init__(self, atom_data, nlte_species):
self.atom_data = atom_data
self.lines = atom_data.lines.reset_index()
self.nlte_species = nlte_species
if nlte_species:
logger.info('Preparing the NLTE data')
self._init_indices()
self._create_nlte_mask()
if atom_data.has_collision_data:
self._create_collision_coefficient_matrix()
else:
self._create_nlte_mask()
def _init_indices(self):
self.lines_idx = {}
self.lines_level_number_lower = {}
self.lines_level_number_upper = {}
self.A_uls = {}
self.B_uls = {}
self.B_lus = {}
for species in self.nlte_species:
lines_idx = np.where((self.lines.atomic_number == species[0]) &
(self.lines.ion_number == species[1]))
self.lines_idx[species] = lines_idx
self.lines_level_number_lower[species] = self.lines.level_number_lower.values[lines_idx].astype(int)
self.lines_level_number_upper[species] = self.lines.level_number_upper.values[lines_idx].astype(int)
self.A_uls[species] = self.atom_data.lines.A_ul.values[lines_idx]
self.B_uls[species] = self.atom_data.lines.B_ul.values[lines_idx]
self.B_lus[species] = self.atom_data.lines.B_lu.values[lines_idx]
def _create_nlte_mask(self):
self.nlte_levels_mask = np.zeros(self.atom_data.levels.energy.count()).astype(bool)
self.nlte_lines_mask = np.zeros(self.atom_data.lines.wavelength.count()).astype(bool)
for species in self.nlte_species:
current_levels_mask = (self.atom_data.levels.index.get_level_values(0) == species[0]) & \
(self.atom_data.levels.index.get_level_values(1) == species[1])
current_lines_mask = (self.atom_data.lines.atomic_number.values == species[0]) & \
(self.atom_data.lines.ion_number.values == species[1])
self.nlte_levels_mask |= current_levels_mask
self.nlte_lines_mask |= current_lines_mask
def _create_collision_coefficient_matrix(self):
self.C_ul_interpolator = {}
self.delta_E_matrices = {}
self.g_ratio_matrices = {}
collision_group = self.atom_data.collision_data.groupby(level=['atomic_number', 'ion_number'])
for species in self.nlte_species:
no_of_levels = self.atom_data.levels.ix[species].energy.count()
C_ul_matrix = np.zeros((no_of_levels, no_of_levels, len(self.atom_data.collision_data_temperatures)))
delta_E_matrix = np.zeros((no_of_levels, no_of_levels))
g_ratio_matrix = np.zeros((no_of_levels, no_of_levels))
for (atomic_number, ion_number, level_number_lower, level_number_upper), line in \
collision_group.get_group(species).iterrows():
C_ul_matrix[level_number_lower, level_number_upper, :] = line.values[2:]
delta_E_matrix[level_number_lower, level_number_upper] = line['delta_e']
#TODO TARDISATOMIC fix change the g_ratio to be the otherway round - I flip them now here.
g_ratio_matrix[level_number_lower, level_number_upper] = line['g_ratio']
self.C_ul_interpolator[species] = interpolate.interp1d(self.atom_data.collision_data_temperatures,
C_ul_matrix)
self.delta_E_matrices[species] = delta_E_matrix
self.g_ratio_matrices[species] = g_ratio_matrix
def get_collision_matrix(self, species, t_electrons):
c_ul_matrix = self.C_ul_interpolator[species](t_electrons)
no_of_levels = c_ul_matrix.shape[0]
c_ul_matrix[np.isnan(c_ul_matrix)] = 0.0
#TODO in tardisatomic the g_ratio is the other way round - here I'll flip it in prepare_collision matrix
c_lu_matrix = c_ul_matrix * np.exp(-self.delta_E_matrices[species].reshape((no_of_levels, no_of_levels, 1)) /
t_electrons.reshape((1, 1, t_electrons.shape[0]))) * \
self.g_ratio_matrices[species].reshape((no_of_levels, no_of_levels, 1))
return c_ul_matrix + c_lu_matrix.transpose(1, 0, 2)
| bsd-3-clause |
imaculate/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 73 | 2264 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matlotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
carlthome/librosa | librosa/feature/inverse.py | 1 | 9012 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Feature inversion'''
import warnings
import numpy as np
import scipy.fftpack
from ..util.exceptions import ParameterError
from ..core.spectrum import griffinlim
from ..core.spectrum import db_to_power
from ..util.utils import tiny
from .. import filters
from ..util import nnls
__all__ = ['mel_to_stft', 'mel_to_audio',
'mfcc_to_mel', 'mfcc_to_audio']
def mel_to_stft(M, sr=22050, n_fft=2048, power=2.0, **kwargs):
'''Approximate STFT magnitude from a Mel power spectrogram.
Parameters
----------
M : np.ndarray [shape=(n_mels, n), non-negative]
The spectrogram as produced by `feature.melspectrogram`
sr : number > 0 [scalar]
sampling rate of the underlying signal
n_fft : int > 0 [scalar]
number of FFT components in the resulting STFT
power : float > 0 [scalar]
Exponent for the magnitude melspectrogram
kwargs : additional keyword arguments
Mel filter bank parameters.
See `librosa.filters.mel` for details
Returns
-------
S : np.ndarray [shape=(n_fft, t), non-negative]
An approximate linear magnitude spectrogram
See Also
--------
feature.melspectrogram
core.stft
filters.mel
util.nnls
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=5, offset=10)
>>> S = np.abs(librosa.stft(y))
>>> mel_spec = librosa.feature.melspectrogram(S=S, sr=sr)
>>> S_inv = librosa.feature.inverse.mel_to_stft(mel_spec, sr=sr)
Compare the results visually
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max, top_db=None),
... y_axis='log', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Original STFT')
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(librosa.amplitude_to_db(np.abs(S_inv - S),
... ref=S.max(), top_db=None),
... vmax=0, y_axis='log', x_axis='time', cmap='magma')
>>> plt.title('Residual error (dB)')
>>> plt.colorbar()
>>> plt.tight_layout()
>>> plt.show()
'''
# Construct a mel basis with dtype matching the input data
mel_basis = filters.mel(sr, n_fft, n_mels=M.shape[0],
dtype=M.dtype,
**kwargs)
# Find the non-negative least squares solution, and apply
# the inverse exponent.
# We'll do the exponentiation in-place.
inverse = nnls(mel_basis, M)
return np.power(inverse, 1. / power, out=inverse)
def mel_to_audio(M, sr=22050, n_fft=2048, hop_length=512, win_length=None,
window='hann', center=True, pad_mode='reflect', power=2.0, n_iter=32,
length=None, dtype=np.float32, **kwargs):
"""Invert a mel power spectrogram to audio using Griffin-Lim.
This is primarily a convenience wrapper for:
>>> S = librosa.feature.inverse.mel_to_stft(M)
>>> y = librosa.griffinlim(S)
Parameters
----------
M : np.ndarray [shape=(n_mels, n), non-negative]
The spectrogram as produced by `feature.melspectrogram`
sr : number > 0 [scalar]
sampling rate of the underlying signal
n_fft : int > 0 [scalar]
number of FFT components in the resulting STFT
hop_length : None or int > 0
The hop length of the STFT. If not provided, it will default to `n_fft // 4`
win_length : None or int > 0
The window length of the STFT. By default, it will equal `n_fft`
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
A window specification as supported by `stft` or `istft`
center : boolean
If `True`, the STFT is assumed to use centered frames.
If `False`, the STFT is assumed to use left-aligned frames.
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
power : float > 0 [scalar]
Exponent for the magnitude melspectrogram
n_iter : int > 0
The number of iterations for Griffin-Lim
length : None or int > 0
If provided, the output `y` is zero-padded or clipped to exactly `length`
samples.
dtype : np.dtype
Real numeric type for the time-domain signal. Default is 32-bit float.
kwargs : additional keyword arguments
Mel filter bank parameters
Returns
-------
y : np.ndarray [shape(n,)]
time-domain signal reconstructed from `M`
See Also
--------
core.griffinlim
feature.melspectrogram
filters.mel
feature.inverse.mel_to_stft
"""
stft = mel_to_stft(M, sr=sr, n_fft=n_fft, power=power, **kwargs)
return griffinlim(stft, n_iter=n_iter, hop_length=hop_length, win_length=win_length,
window=window, center=center, dtype=dtype, length=length,
pad_mode=pad_mode)
def mfcc_to_mel(mfcc, n_mels=128, dct_type=2, norm='ortho', ref=1.0, lifter=0):
'''Invert Mel-frequency cepstral coefficients to approximate a Mel power
spectrogram.
This inversion proceeds in two steps:
1. The inverse DCT is applied to the MFCCs
2. `core.db_to_power` is applied to map the dB-scaled result to a power spectrogram
Parameters
----------
mfcc : np.ndarray [shape=(n_mfcc, n)]
The Mel-frequency cepstral coefficients
n_mels : int > 0
The number of Mel frequencies
dct_type : {1, 2, 3}
Discrete cosine transform (DCT) type
By default, DCT type-2 is used.
norm : None or 'ortho'
If `dct_type` is `2 or 3`, setting `norm='ortho'` uses an orthonormal
DCT basis.
Normalization is not supported for `dct_type=1`.
ref : number or callable
Reference power for (inverse) decibel calculation
lifter : number >= 0
If `lifter>0`, apply inverse liftering (inverse cepstral filtering):
`M[n, :] <- M[n, :] / (1 + sin(pi * (n + 1) / lifter)) * lifter / 2`
Returns
-------
M : np.ndarray [shape=(n_mels, n)]
An approximate Mel power spectrum recovered from `mfcc`
Warns
--------
UserWarning
due to critical values in lifter array that invokes underflow.
See Also
--------
mfcc
melspectrogram
scipy.fftpack.dct
'''
if lifter > 0:
n_mfcc = mfcc.shape[0]
idx = np.arange(1, 1 + n_mfcc, dtype=mfcc.dtype)
lifter_sine = 1 + lifter * 0.5 * np.sin(np.pi * idx / lifter)[:, np.newaxis]
# raise a UserWarning if lifter array includes critical values
if np.any(np.abs(lifter_sine) < np.finfo(lifter_sine.dtype).eps):
warnings.warn(message="lifter array includes critial values that may invoke underflow.",
category=UserWarning)
# lifter mfcc values
mfcc = mfcc / (lifter_sine + tiny(mfcc))
elif lifter != 0:
raise ParameterError('MFCC to mel lifter must be a non-negative number.')
logmel = scipy.fftpack.idct(mfcc, axis=0, type=dct_type, norm=norm, n=n_mels)
return db_to_power(logmel, ref=ref)
def mfcc_to_audio(mfcc, n_mels=128, dct_type=2, norm='ortho', ref=1.0, lifter=0, **kwargs):
'''Convert Mel-frequency cepstral coefficients to a time-domain audio signal
This function is primarily a convenience wrapper for the following steps:
1. Convert mfcc to Mel power spectrum (`mfcc_to_mel`)
2. Convert Mel power spectrum to time-domain audio (`mel_to_audio`)
Parameters
----------
mfcc : np.ndarray [shape=(n_mfcc, n)]
The Mel-frequency cepstral coefficients
n_mels : int > 0
The number of Mel frequencies
dct_type : {1, 2, 3}
Discrete cosine transform (DCT) type
By default, DCT type-2 is used.
norm : None or 'ortho'
If `dct_type` is `2 or 3`, setting `norm='ortho'` uses an orthonormal
DCT basis.
Normalization is not supported for `dct_type=1`.
ref : number or callable
Reference power for (inverse) decibel calculation
lifter : number >= 0
If `lifter>0`, apply inverse liftering (inverse cepstral filtering):
`M[n, :] <- M[n, :] / (1 + sin(pi * (n + 1) / lifter)) * lifter / 2`
kwargs : additional keyword arguments
Parameters to pass through to `mel_to_audio`
Returns
-------
y : np.ndarray [shape=(n)]
A time-domain signal reconstructed from `mfcc`
See Also
--------
mfcc_to_mel
mel_to_audio
feature.mfcc
core.griffinlim
scipy.fftpack.dct
'''
mel_spec = mfcc_to_mel(mfcc, n_mels=n_mels, dct_type=dct_type, norm=norm,
ref=ref, lifter=lifter)
return mel_to_audio(mel_spec, **kwargs)
| isc |
dpshelio/sunpy | examples/time_series/goes_hek_m25.py | 1 | 1593 | """
==============================
Flare times on a GOES XRS plot
==============================
How to plot flare times as provided by the HEK on a GOES XRS plot.
"""
import matplotlib.pyplot as plt
from sunpy.timeseries import TimeSeries
from sunpy.time import TimeRange, parse_time
from sunpy.net import hek, Fido, attrs as a
###############################################################################
# Let's first grab GOES XRS data for a particular time of interest
tr = TimeRange(['2011-06-07 04:00', '2011-06-07 12:00'])
results = Fido.search(a.Time(tr), a.Instrument('XRS'))
###############################################################################
# Then download the data and load it into a TimeSeries
files = Fido.fetch(results)
goes = TimeSeries(files)
###############################################################################
# Next lets grab the HEK flare data for this time from the NOAA Space Weather
# Prediction Center (SWPC)
client = hek.HEKClient()
flares_hek = client.search(hek.attrs.Time(tr.start, tr.end),
hek.attrs.FL, hek.attrs.FRM.Name == 'SWPC')
###############################################################################
# Lets plot everything together
fig, ax = plt.subplots()
goes.plot()
ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)
ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,
parse_time(flares_hek[0].get('event_endtime')).plot_date,
alpha=0.2, label=flares_hek[0].get('fl_goescls'))
ax.legend(loc=2)
ax.set_yscale('log')
plt.show()
| bsd-2-clause |
doged/electrum-doged | plugins/plot.py | 4 | 3822 | from PyQt4.QtGui import *
from electrum_doged.plugins import BasePlugin, hook
from electrum_doged.i18n import _
import datetime
from electrum_doged.util import format_satoshis
try:
import matplotlib.pyplot as plt
import matplotlib.dates as md
from matplotlib.patches import Ellipse
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker
flag_matlib=True
except:
flag_matlib=False
class Plugin(BasePlugin):
def is_available(self):
if flag_matlib:
return True
else:
return False
@hook
def init_qt(self, gui):
self.win = gui.main_window
@hook
def export_history_dialog(self, d,hbox):
self.wallet = d.wallet
history = self.wallet.get_history()
if len(history) > 0:
b = QPushButton(_("Preview plot"))
hbox.addWidget(b)
b.clicked.connect(lambda: self.do_plot(self.wallet, history))
else:
b = QPushButton(_("No history to plot"))
hbox.addWidget(b)
def do_plot(self, wallet, history):
balance_Val=[]
fee_val=[]
value_val=[]
datenums=[]
unknown_trans = 0
pending_trans = 0
counter_trans = 0
balance = 0
for item in history:
tx_hash, confirmations, value, timestamp = item
balance += value
if confirmations:
if timestamp is not None:
try:
datenums.append(md.date2num(datetime.datetime.fromtimestamp(timestamp)))
balance_string = format_satoshis(balance, False)
balance_Val.append(float((format_satoshis(balance,False)))*1000.0)
except [RuntimeError, TypeError, NameError] as reason:
unknown_trans += 1
pass
else:
unknown_trans += 1
else:
pending_trans += 1
value_string = format_satoshis(value, True)
value_val.append(float(value_string)*1000.0)
if tx_hash:
label, is_default_label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
x=19
test11="Unknown transactions = "+str(unknown_trans)+" Pending transactions = "+str(pending_trans)+" ."
box1 = TextArea(" Test : Number of pending transactions", textprops=dict(color="k"))
box1.set_text(test11)
box = HPacker(children=[box1],
align="center",
pad=0.1, sep=15)
anchored_box = AnchoredOffsetbox(loc=3,
child=box, pad=0.5,
frameon=True,
bbox_to_anchor=(0.5, 1.02),
bbox_transform=ax.transAxes,
borderpad=0.5,
)
ax.add_artist(anchored_box)
plt.ylabel('mDOGED')
plt.xlabel('Dates')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].plot(datenums,balance_Val,marker='o',linestyle='-',color='blue',label='Balance')
axarr[0].legend(loc='upper left')
axarr[0].set_title('History Transactions')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[1].plot(datenums,value_val,marker='o',linestyle='-',color='green',label='Value')
axarr[1].legend(loc='upper left')
# plt.annotate('unknown transaction = %d \n pending transactions = %d' %(unknown_trans,pending_trans),xy=(0.7,0.05),xycoords='axes fraction',size=12)
plt.show()
| gpl-3.0 |
IntelLabs/hpat | examples/series/series_corr.py | 1 | 1773 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import pandas as pd
from numba import njit
@njit
def series_corr():
s1 = pd.Series([3.2, -10, np.nan, 0.23, 9.2])
s2 = pd.Series([5., 0, 3.3, np.nan, 9.2])
return s1.corr(s2) # Expect value: 0.98673...
print(series_corr())
| bsd-2-clause |
boddulavineela/mase | python101/code/zipf.py | 14 | 1453 | """This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import sys
import string
import matplotlib.pyplot as pyplot
from analyze_book import *
def rank_freq(hist):
"""Returns a list of tuples where each tuple is a rank
and the number of times the item with that rank appeared.
"""
# sort the list of frequencies in decreasing order
freqs = hist.values()
freqs.sort(reverse=True)
# enumerate the ranks and frequencies
rf = [(r+1, f) for r, f in enumerate(freqs)]
return rf
def print_ranks(hist):
"""Prints the rank vs. frequency data."""
for r, f in rank_freq(hist):
print r, f
def plot_ranks(hist, scale='log'):
"""Plots frequency vs. rank."""
t = rank_freq(hist)
rs, fs = zip(*t)
pyplot.clf()
pyplot.xscale(scale)
pyplot.yscale(scale)
pyplot.title('Zipf plot')
pyplot.xlabel('rank')
pyplot.ylabel('frequency')
pyplot.plot(rs, fs, 'r-')
pyplot.show()
def main(name, filename='emma.txt', flag='plot', *args):
hist = process_file(filename, skip_header=True)
# either print the results or plot them
if flag == 'print':
print_ranks(hist)
elif flag == 'plot':
plot_ranks(hist)
else:
print 'Usage: zipf.py filename [print|plot]'
if __name__ == '__main__':
main(*sys.argv)
| unlicense |
saiwing-yeung/scikit-learn | examples/ensemble/plot_isolation_forest.py | 65 | 2363 | """
==========================================
IsolationForest example
==========================================
An example using IsolationForest for anomaly detection.
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a measure
of abnormality and our decision function.
Random partitioning produces noticeable shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path lengths
for particular samples, they are highly likely to be anomalies.
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
rng = np.random.RandomState(42)
# Generate train data
X = 0.3 * rng.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rng.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
# plot the line, the samples, and the nearest vectors to the plane
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("IsolationForest")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([b1, b2, c],
["training observations",
"new regular observations", "new abnormal observations"],
loc="upper left")
plt.show()
| bsd-3-clause |
btabibian/scikit-learn | sklearn/utils/estimator_checks.py | 2 | 65609 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_allclose_dense_sparse
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_dict_equal
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import SkipTestWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.utils.validation import has_fit_parameter, _num_samples
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_sample_weights_pandas_series
yield check_sample_weights_list
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in
["MultinomialNB", "LabelPropagation", "LabelSpreading"] and
# TODO some complication with -1 label
name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in classifier.get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_no_nan(name, estimator_orig):
# Checks that the Estimator targets are not NaN.
estimator = clone(estimator_orig)
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(estimator, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
estimator.fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised error as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(name, transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(name, clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(name, estimator):
for check in _yield_non_meta_checks(name, estimator):
yield check
if isinstance(estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, estimator):
yield check
if isinstance(estimator, RegressorMixin):
for check in _yield_regressor_checks(name, estimator):
yield check
if isinstance(estimator, TransformerMixin):
for check in _yield_transformer_checks(name, estimator):
yield check
if isinstance(estimator, ClusterMixin):
for check in _yield_clustering_checks(name, estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
yield check_get_params_invariance
yield check_dict_unchanged
yield check_dont_overwrite_parameters
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
This test can be applied to classes or instances.
Classes currently have some additional tests that related to construction,
while passing instances allows the testing of multiple options.
Parameters
----------
estimator : estimator object or class
Estimator to check. Estimator is a class object or instance.
"""
if isinstance(Estimator, type):
# got a class
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
check_no_fit_attributes_set_in_init(name, Estimator)
estimator = Estimator()
else:
# got an instance
estimator = Estimator
name = type(estimator).__name__
for check in _yield_all_checks(name, estimator):
try:
check(name, estimator)
except SkipTest as message:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(message, SkipTestWarning)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_checking_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR, LinearSVC
if estimator.__class__.__name__ in ['LinearSVR', 'LinearSVC']:
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=2)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, estimator_orig):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in ['Scaler', 'StandardScaler']:
estimator = clone(estimator).set_params(with_mean=False)
else:
estimator = clone(estimator)
# fit and predict
try:
with ignore_warnings(category=DeprecationWarning):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_pandas_series(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = clone(estimator_orig)
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = pd.DataFrame([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]])
y = pd.Series([1, 1, 1, 2, 2, 2])
weights = pd.Series([1] * 6)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_list(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
if has_fit_parameter(estimator_orig, "sample_weight"):
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(estimator, y)
sample_weight = [3] * 10
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def check_dtype_object(name, estimator_orig):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_dict_unchanged(name, estimator_orig):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert_dict_equal(estimator.__dict__, dict_before,
'Estimator changes __dict__ during %s' % method)
def is_public_parameter(attr):
return not (attr.startswith('_') or attr.endswith('_'))
@ignore_warnings(category=DeprecationWarning)
def check_dont_overwrite_parameters(name, estimator_orig):
# check that fit method only changes or sets private attributes
if hasattr(estimator_orig.__init__, "deprecated_original"):
# to not check deprecated classes
return
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys()
if is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit
if key not in dict_before_fit.keys()]
# check that fit doesn't add any public attribute
assert_true(not attrs_added_by_fit,
('Estimator adds public attribute(s) during'
' the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but %s added' % ', '.join(attrs_added_by_fit)))
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [key for key in public_keys_after_fit
if (dict_before_fit[key]
is not dict_after_fit[key])]
assert_true(not attrs_changed_by_fit,
('Estimator changes public attribute(s) during'
' the fit method. Estimators are only allowed'
' to change attributes started'
' or ended with _, but'
' %s changed' % ', '.join(attrs_changed_by_fit)))
@ignore_warnings(category=DeprecationWarning)
def check_fit2d_predict1d(name, estimator_orig):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
@ignore_warnings
def check_fit2d_1sample(name, estimator_orig):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, estimator_orig):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, estimator_orig):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, estimator_orig):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings(category=DeprecationWarning)
def check_transformer_general(name, transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, transformer, X, y)
_check_transformer(name, transformer, X.tolist(), y.tolist())
@ignore_warnings(category=DeprecationWarning)
def check_transformer_data_not_an_array(name, transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, transformer, this_X, this_y)
@ignore_warnings(category=DeprecationWarning)
def check_transformers_unfitted(name, transformer):
X, y = _boston_subset()
transformer = clone(transformer)
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, transformer_orig, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
transformer = clone(transformer_orig)
set_random_state(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(
x_pred, x_pred2, atol=1e-2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer)
assert_allclose_dense_sparse(
x_pred, x_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
else:
assert_allclose_dense_sparse(
X_pred, X_pred2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer, atol=1e-2)
assert_allclose_dense_sparse(
X_pred, X_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
assert_equal(_num_samples(X_pred2), n_samples)
assert_equal(_num_samples(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, estimator_orig):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_allclose_dense_sparse(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, estimator_orig):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
if args[0] == "self":
# if_delegate_has_method makes methods into functions
# with an explicit "self", so need to shift arguments
args = args[1:]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, type(estimator).__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, estimator_orig):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_empty_data_messages(name, estimator_orig):
e = clone(estimator_orig)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(e, np.array([1, 0, 1]))
msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_nan_inf(name, estimator_orig):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, estimator)
@ignore_warnings
def check_estimators_pickle(name, estimator_orig):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
estimator = clone(estimator_orig)
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
if estimator.__module__.startswith('sklearn.'):
assert_true(b"version" in pickled_estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_partial_fit_n_features(name, estimator_orig):
# check if number of features changes between calls to partial_fit.
if not hasattr(estimator_orig, 'partial_fit'):
return
estimator = clone(estimator_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
try:
if isinstance(estimator, ClassifierMixin):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
except NotImplementedError:
return
assert_raises(ValueError, estimator.partial_fit, X[:, :-1], y)
@ignore_warnings(category=DeprecationWarning)
def check_clustering(name, clusterer_orig):
clusterer = clone(clusterer_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
if hasattr(clusterer, "n_clusters"):
clusterer.set_params(n_clusters=3)
set_random_state(clusterer)
if name == 'AffinityPropagation':
clusterer.set_params(preference=-100)
clusterer.set_params(max_iter=100)
# fit
clusterer.fit(X)
# with lists
clusterer.fit(X.tolist())
assert_equal(clusterer.labels_.shape, (n_samples,))
pred = clusterer.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name == 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(clusterer)
with warnings.catch_warnings(record=True):
pred2 = clusterer.fit_predict(X)
assert_array_equal(pred, pred2)
@ignore_warnings(category=DeprecationWarning)
def check_clusterer_compute_labels_predict(name, clusterer_orig):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = clone(clusterer_orig)
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
@ignore_warnings(category=DeprecationWarning)
def check_classifiers_one_label(name, classifier_orig):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
classifier = clone(classifier_orig)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, classifier_orig):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = clone(classifier_orig)
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes == 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes == 3 and
# 1on1 of LibSVM works differently
not isinstance(classifier, BaseLibSVM)):
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_allclose(np.sum(y_prob, axis=1), np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_allclose(y_log_prob, np.log(y_prob), 8)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
@ignore_warnings(category=DeprecationWarning)
def check_estimators_fit_returns_self(name, estimator_orig):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
# some want non-negative input
X -= X.min()
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
est = clone(estimator_orig)
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_2d(name, estimator_orig):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_allclose(y_pred.ravel(), y_pred_2d.ravel())
@ignore_warnings(category=DeprecationWarning)
def check_classifiers_classes(name, classifier_orig):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
classifier = clone(classifier_orig)
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
@ignore_warnings(category=DeprecationWarning)
def check_regressors_int(name, regressor_orig):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(regressor_orig, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = clone(regressor_orig)
regressor_2 = clone(regressor_orig)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
@ignore_warnings(category=DeprecationWarning)
def check_regressors_train(name, regressor_orig):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
regressor = clone(regressor_orig)
y = multioutput_estimator_convert_y_2d(regressor, y)
rnd = np.random.RandomState(0)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, regressor_orig):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
regressor = clone(regressor_orig)
y = multioutput_estimator_convert_y_2d(regressor, X[:, 0])
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
@ignore_warnings(category=DeprecationWarning)
def check_class_weight_classifiers(name, classifier_orig):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
classifier = clone(classifier_orig).set_params(
class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
@ignore_warnings(category=DeprecationWarning)
def check_class_weight_balanced_classifiers(name, classifier_orig, X_train,
y_train, X_test, y_test, weights):
classifier = clone(classifier_orig)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
@ignore_warnings(category=DeprecationWarning)
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
# this is run on classes, not instances, though this should be changed
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_allclose(coef_balanced, coef_manual)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_overwrite_params(name, estimator_orig):
X, y = make_blobs(random_state=0, n_samples=9)
# some want non-negative input
X -= X.min()
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
@ignore_warnings(category=DeprecationWarning)
def check_no_fit_attributes_set_in_init(name, Estimator):
"""Check that Estimator.__init__ doesn't set trailing-_ attributes."""
# this check works on classes, not instances
estimator = Estimator()
for attr in dir(estimator):
if attr.endswith("_") and not attr.startswith("__"):
# This check is for properties, they can be listed in dir
# while at the same time have hasattr return False as long
# as the property getter raises an AttributeError
assert_false(
hasattr(estimator, attr),
"By convention, attributes ending with '_' are "
'estimated from data in scikit-learn. Consequently they '
'should not be initialized in the constructor of an '
'estimator but in the fit method. Attribute {!r} '
'was found in estimator {}'.format(attr, name))
@ignore_warnings(category=DeprecationWarning)
def check_sparsify_coefficients(name, estimator_orig):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = clone(estimator_orig)
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
@ignore_warnings(category=DeprecationWarning)
def check_classifier_data_not_an_array(name, estimator_orig):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
check_estimators_data_not_an_array(name, estimator_orig, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_regressor_data_not_an_array(name, estimator_orig):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
check_estimators_data_not_an_array(name, estimator_orig, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_data_not_an_array(name, estimator_orig, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# separate estimators to control random seeds
estimator_1 = clone(estimator_orig)
estimator_2 = clone(estimator_orig)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
def check_parameters_default_constructible(name, Estimator):
# this check works on classes, not instances
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self' and
p.kind != p.VAR_KEYWORD and
p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(estimator, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in estimator.__class__.__name__:
return np.reshape(y, (-1, 1))
return y
@ignore_warnings(category=DeprecationWarning)
def check_non_transformer_estimators_n_iter(name, estimator_orig):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = clone(estimator_orig).set_params(alpha=0.)
else:
estimator = clone(estimator_orig)
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = multioutput_estimator_convert_y_2d(estimator, y_)
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
# HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b
# which doesn't return a n_iter for old versions of SciPy.
if not (name == 'HuberRegressor' and estimator.n_iter_ is None):
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_transformer_n_iter(name, estimator_orig):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = clone(estimator_orig)
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_get_params_invariance(name, estimator_orig):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
return X
e = clone(estimator_orig)
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
@ignore_warnings(category=DeprecationWarning)
def check_classifiers_regression_target(name, estimator_orig):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = clone(estimator_orig)
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_decision_proba_consistency(name, estimator_orig):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(n_samples=100, random_state=0, n_features=4,
centers=centers, cluster_std=1.0, shuffle=True)
X_test = np.random.randn(20, 2) + 4
estimator = clone(estimator_orig)
if (hasattr(estimator, "decision_function") and
hasattr(estimator, "predict_proba")):
estimator.fit(X, y)
a = estimator.predict_proba(X_test)[:, 1]
b = estimator.decision_function(X_test)
assert_array_equal(rankdata(a), rankdata(b))
| bsd-3-clause |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/mpl_toolkits/axisartist/floating_axes.py | 18 | 22796 | """
An experimental support for curvilinear grid.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
# TODO :
# *. see if tick_iterator method can be simplified by reusing the parent method.
from itertools import chain
from .grid_finder import GridFinder
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from . import grid_helper_curvelinear
class FloatingAxisArtistHelper(grid_helper_curvelinear.FloatingAxisArtistHelper):
pass
class FixedAxisArtistHelper(grid_helper_curvelinear.FloatingAxisArtistHelper):
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
value, nth_coord = grid_helper.get_data_boundary(side) # return v= 0 , nth=1, extremes of the other coordinate.
super(FixedAxisArtistHelper, self).__init__(grid_helper,
nth_coord,
value,
axis_direction=side,
)
#self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.value = value
self.grid_helper = grid_helper
self._side = side
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
self.grid_info = self.grid_helper.grid_info
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs, lat_levs = np.asarray(lon_levs), np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.001 / lat_factor
else:
yy0 = lat_levs
dy = 0.001
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.001 / lon_factor
else:
xx0 = lon_levs
dx = 0.001
_extremes = self.grid_helper._extremes
xmin, xmax = sorted(_extremes[:2])
ymin, ymax = sorted(_extremes[2:])
if self.nth_coord == 0:
mask = (ymin <= yy0) & (yy0 <= ymax)
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (xmin <= xx0) & (xx0 <= xmax)
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
#yy0_ = yy0.copy()
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>xmax] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
yy00 = yy0.copy()
yy00[yy0+dy>ymax] -= dy
xx2a, yy2a = transform_xy(xx0, yy00)
xx2b, yy2b = transform_xy(xx0, yy00+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
#xx0_ = xx0.copy()
xx1, yy1 = transform_xy(xx0, yy0)
yy00 = yy0.copy()
yy00[yy0+dy>ymax] -= dy
xx1a, yy1a = transform_xy(xx0, yy00)
xx1b, yy1b = transform_xy(xx0, yy00+dy)
xx00 = xx0.copy()
xx00[xx0+dx>xmax] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
#_mod = (d2-d1+180)%360
#if _mod < 180:
# d1 += 180
##_div, _mod = divmod(d2-d1, 360)
yield [x, y], d1, d2, lab
#, d2/3.14159*180.+da)
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
from matplotlib.path import Path
k, v = dict(left=("lon_lines0", 0),
right=("lon_lines0", 1),
bottom=("lat_lines0", 0),
top=("lat_lines0", 1))[self._side]
xx, yy = self.grid_info[k][v]
return Path(list(zip(xx, yy)))
from .grid_finder import ExtremeFinderSimple
class ExtremeFinderFixed(ExtremeFinderSimple):
def __init__(self, extremes):
self._extremes = extremes
def __call__(self, transform_xy, x1, y1, x2, y2):
"""
get extreme values.
x1, y1, x2, y2 in image coordinates (0-based)
nx, ny : number of division in each axis
"""
#lon_min, lon_max, lat_min, lat_max = self._extremes
return self._extremes
class GridHelperCurveLinear(grid_helper_curvelinear.GridHelperCurveLinear):
def __init__(self, aux_trans, extremes,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g., x2, y2 = trans(x1, y1)
"""
self._old_values = None
self._extremes = extremes
extreme_finder = ExtremeFinderFixed(extremes)
super(GridHelperCurveLinear, self).__init__(aux_trans,
extreme_finder,
grid_locator1=grid_locator1,
grid_locator2=grid_locator2,
tick_formatter1=tick_formatter1,
tick_formatter2=tick_formatter2)
# def update_grid_finder(self, aux_trans=None, **kw):
# if aux_trans is not None:
# self.grid_finder.update_transform(aux_trans)
# self.grid_finder.update(**kw)
# self.invalidate()
# def _update(self, x1, x2, y1, y2):
# "bbox in 0-based image coordinates"
# # update wcsgrid
# if self.valid() and self._old_values == (x1, x2, y1, y2):
# return
# self._update_grid(x1, y1, x2, y2)
# self._old_values = (x1, x2, y1, y2)
# self._force_update = False
def get_data_boundary(self, side):
"""
return v= 0 , nth=1
"""
lon1, lon2, lat1, lat2 = self._extremes
return dict(left=(lon1, 0),
right=(lon2, 0),
bottom=(lat1, 1),
top=(lat2, 1))[side]
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
nth_coord_ticks=nth_coord)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
return axisline
# new_floating_axis will inherit the grid_helper's extremes.
# def new_floating_axis(self, nth_coord,
# value,
# axes=None,
# axis_direction="bottom"
# ):
# axis = super(GridHelperCurveLinear,
# self).new_floating_axis(nth_coord,
# value, axes=axes,
# axis_direction=axis_direction)
# # set extreme values of the axis helper
# if nth_coord == 1:
# axis.get_helper().set_extremes(*self._extremes[:2])
# elif nth_coord == 0:
# axis.get_helper().set_extremes(*self._extremes[2:])
# return axis
def _update_grid(self, x1, y1, x2, y2):
#self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
if self.grid_info is None:
self.grid_info = dict()
grid_info = self.grid_info
grid_finder = self.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
lon_min, lon_max = sorted(extremes[:2])
lat_min, lat_max = sorted(extremes[2:])
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = lon_min, lon_max, lat_min, lat_max #extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
if lon_factor is None:
lon_values = np.asarray(lon_levs[:lon_n])
else:
lon_values = np.asarray(lon_levs[:lon_n]/lon_factor)
if lat_factor is None:
lat_values = np.asarray(lat_levs[:lat_n])
else:
lat_values = np.asarray(lat_levs[:lat_n]/lat_factor)
lon_values0 = lon_values[(lon_min<lon_values) & (lon_values<lon_max)]
lat_values0 = lat_values[(lat_min<lat_values) & (lat_values<lat_max)]
lon_lines, lat_lines = grid_finder._get_raw_grid_lines(lon_values0,
lat_values0,
lon_min, lon_max,
lat_min, lat_max)
grid_info["lon_lines"] = lon_lines
grid_info["lat_lines"] = lat_lines
lon_lines, lat_lines = grid_finder._get_raw_grid_lines(extremes[:2],
extremes[2:],
*extremes)
#lon_min, lon_max,
# lat_min, lat_max)
grid_info["lon_lines0"] = lon_lines
grid_info["lat_lines0"] = lat_lines
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon_lines"]:
grid_lines.extend([gl])
if axis in ["both", "y"]:
for gl in self.grid_info["lat_lines"]:
grid_lines.extend([gl])
return grid_lines
def get_boundary(self):
"""
return Nx2 array of x,y coordinate of the boundary
"""
x0, x1, y0, y1 = self._extremes
tr = self._aux_trans
xx = np.linspace(x0, x1, 100)
yy0, yy1 = np.empty_like(xx), np.empty_like(xx)
yy0.fill(y0)
yy1.fill(y1)
yy = np.linspace(y0, y1, 100)
xx0, xx1 = np.empty_like(yy), np.empty_like(yy)
xx0.fill(x0)
xx1.fill(x1)
xxx = np.concatenate([xx[:-1], xx1[:-1], xx[-1:0:-1], xx0])
yyy = np.concatenate([yy0[:-1], yy[:-1], yy1[:-1], yy[::-1]])
t = tr.transform(np.array([xxx, yyy]).transpose())
return t
class FloatingAxesBase(object):
def __init__(self, *kl, **kwargs):
grid_helper = kwargs.get("grid_helper", None)
if grid_helper is None:
raise ValueError("FloatingAxes requires grid_helper argument")
if not hasattr(grid_helper, "get_boundary"):
raise ValueError("grid_helper must implement get_boundary method")
self._axes_class_floating.__init__(self, *kl, **kwargs)
self.set_aspect(1.)
self.adjust_axes_lim()
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
import matplotlib.patches as mpatches
grid_helper = self.get_grid_helper()
t = grid_helper.get_boundary()
return mpatches.Polygon(t)
def cla(self):
self._axes_class_floating.cla(self)
#HostAxes.cla(self)
self.patch.set_transform(self.transData)
patch = self._axes_class_floating._gen_axes_patch(self)
patch.set_figure(self.figure)
patch.set_visible(False)
patch.set_transform(self.transAxes)
self.patch.set_clip_path(patch)
self.gridlines.set_clip_path(patch)
self._original_patch = patch
def adjust_axes_lim(self):
#t = self.get_boundary()
grid_helper = self.get_grid_helper()
t = grid_helper.get_boundary()
x, y = t[:,0], t[:,1]
xmin, xmax = min(x), max(x)
ymin, ymax = min(y), max(y)
dx = (xmax-xmin)/100.
dy = (ymax-ymin)/100.
self.set_xlim(xmin-dx, xmax+dx)
self.set_ylim(ymin-dy, ymax+dy)
_floatingaxes_classes = {}
def floatingaxes_class_factory(axes_class):
new_class = _floatingaxes_classes.get(axes_class)
if new_class is None:
new_class = type(str("Floating %s" % (axes_class.__name__)),
(FloatingAxesBase, axes_class),
{'_axes_class_floating': axes_class})
_floatingaxes_classes[axes_class] = new_class
return new_class
from .axislines import Axes
from mpl_toolkits.axes_grid1.parasite_axes import host_axes_class_factory
FloatingAxes = floatingaxes_class_factory(host_axes_class_factory(Axes))
import matplotlib.axes as maxes
FloatingSubplot = maxes.subplot_class_factory(FloatingAxes)
# def test(fig):
# from mpl_toolkits.axes_grid.axislines import Subplot
# ax = Subplot(fig, 111)
# fig.add_subplot(ax)
# plt.draw()
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
grid_locator1 = angle_helper.LocatorDMS(15)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
from .grid_finder import FixedLocator
grid_locator2 = FixedLocator([2, 4, 6, 8, 10])
grid_helper = GridHelperCurveLinear(tr,
extremes=(0, 360, 10, 3),
grid_locator1=grid_locator1,
grid_locator2=grid_locator2,
tick_formatter1=tick_formatter1,
tick_formatter2=None,
)
ax1 = FloatingSubplot(fig, 111, grid_helper=grid_helper)
#ax1.axis["top"].set_visible(False)
#ax1.axis["bottom"].major_ticklabels.set_axis_direction("top")
fig.add_subplot(ax1)
#ax1.grid(True)
r_scale = 10.
tr2 = Affine2D().scale(1., 1./r_scale) + tr
grid_locator2 = FixedLocator([30, 60, 90])
grid_helper2 = GridHelperCurveLinear(tr2,
extremes=(0, 360,
10.*r_scale, 3.*r_scale),
grid_locator2=grid_locator2,
)
ax1.axis["right"] = axis = grid_helper2.new_fixed_axis("right", axes=ax1)
ax1.axis["left"].label.set_text("Test 1")
ax1.axis["right"].label.set_text("Test 2")
for an in [ "left", "right"]:
ax1.axis[an].set_visible(False)
#grid_helper2 = ax1.get_grid_helper()
ax1.axis["z"] = axis = grid_helper.new_floating_axis(1, 7,
axes=ax1,
axis_direction="bottom")
axis.toggle(all=True, label=True)
#axis.label.set_axis_direction("top")
axis.label.set_text("z = ?")
axis.label.set_visible(True)
axis.line.set_color("0.5")
#axis.label.set_visible(True)
ax2 = ax1.get_aux_axes(tr)
xx, yy = [67, 90, 75, 30], [2, 5, 8, 4]
ax2.scatter(xx, yy)
l, = ax2.plot(xx, yy, "k-")
l.set_clip_path(ax1.patch)
def curvelinear_test4(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
grid_locator1 = angle_helper.LocatorDMS(5)
tick_formatter1 = angle_helper.FormatterDMS()
from .grid_finder import FixedLocator
grid_locator2 = FixedLocator([2, 4, 6, 8, 10])
grid_helper = GridHelperCurveLinear(tr,
extremes=(120, 30, 10, 0),
grid_locator1=grid_locator1,
grid_locator2=grid_locator2,
tick_formatter1=tick_formatter1,
tick_formatter2=None,
)
ax1 = FloatingSubplot(fig, 111, grid_helper=grid_helper)
#ax1.axis["top"].set_visible(False)
#ax1.axis["bottom"].major_ticklabels.set_axis_direction("top")
fig.add_subplot(ax1)
#ax1.grid(True)
ax1.axis["left"].label.set_text("Test 1")
ax1.axis["right"].label.set_text("Test 2")
for an in [ "top"]:
ax1.axis[an].set_visible(False)
#grid_helper2 = ax1.get_grid_helper()
ax1.axis["z"] = axis = grid_helper.new_floating_axis(1, 70,
axes=ax1,
axis_direction="bottom")
axis.toggle(all=True, label=True)
axis.label.set_axis_direction("top")
axis.label.set_text("z = ?")
axis.label.set_visible(True)
axis.line.set_color("0.5")
#axis.label.set_visible(True)
ax2 = ax1.get_aux_axes(tr)
xx, yy = [67, 90, 75, 30], [2, 5, 8, 4]
ax2.scatter(xx, yy)
l, = ax2.plot(xx, yy, "k-")
l.set_clip_path(ax1.patch)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test(fig)
#curvelinear_test1(fig)
curvelinear_test4(fig)
#plt.draw()
plt.show()
| gpl-2.0 |
thilbern/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
mjgrav2001/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
sgdecker/pygempak | gempak.py | 1 | 3626 | # A Module for Accessing GEMPAK Data from Python
# Copyright 2015 Steven G. Decker
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import gempakf as gp
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
class Dataset:
def __init__(self, gemfile):
self.gemfile = gemfile
n = gp.gemread.get_num_grids(gemfile)
# Local variables
gdattm = np.zeros((20,n,2), np.int8, 'F')
level = np.zeros((n,2), np.int32, 'F')
ivcord = np.zeros(n, np.int32)
vcord = np.zeros((4,n), np.int8, 'F')
parm = np.zeros((12,n), np.int8, 'F')
self.max_grids, self.num_grids, self.nx, self.ny, self.proj, \
self.ang, self.lllat, self.lllon, self.urlat, self.urlon = \
gp.gemread.ggi(gemfile, gdattm, level, ivcord, vcord, parm)
self.proj = self.proj.strip()
self.datainfo = []
for i in range(self.num_grids):
dattim = [gdattm[:,i,0].view('a20')[0].strip(), \
gdattm[:,i,1].view('a20')[0].strip()]
lev = [level[i,0], level[i,1]]
vc = vcord[:,i].view('a4')[0].strip()
fun = parm[:,i].view('a12')[0].strip()
datarow = {'gdattim': dattim, 'glevel': lev, 'gvcord': vc,
'gfunc': fun}
self.datainfo.append(datarow)
def grid_from_num(self, num):
grid = np.zeros((self.nx,self.ny), np.float32, 'F')
gp.gemread.read_grid(self.gemfile, \
self.datainfo[num]['gdattim'][0], \
self.datainfo[num]['gdattim'][1], \
self.datainfo[num]['glevel'][0], \
self.datainfo[num]['glevel'][1], \
self.datainfo[num]['gvcord'], \
self.datainfo[num]['gfunc'], grid)
return grid.transpose()
def grid_from_dict(self, d):
grid = np.zeros((self.nx,self.ny), np.float32, 'F')
gp.gemread.read_grid(self. gemfile, d['gdattim'][0], d['gdattim'][1],
d['glevel'][0], d['glevel'][1], d['gvcord'],
d['gfunc'], grid)
return grid.transpose()
def map_for_dataset(dset, res='l'):
if dset.proj=='LCC':
m = Basemap(llcrnrlon=dset.lllon, llcrnrlat=dset.lllat,
urcrnrlon=dset.urlon, urcrnrlat = dset.urlat,
projection='lcc', lat_1=dset.ang[0], lat_2=dset.ang[2],
lon_0=dset.ang[1], resolution=res)
else:
print 'Sorry, this projection is not yet supported. :-('
m = 0
return m
if __name__ == "__main__":
gemdata = Dataset('nam211.gem')
print gemdata.datainfo[0]
arr = gemdata.grid_from_dict(gemdata.datainfo[10])
m = map_for_dataset(gemdata)
m.drawcountries()
m.drawcoastlines()
m.drawstates()
x = np.linspace(m.xmin,m.xmax,gemdata.nx)
y = np.linspace(m.ymin,m.ymax,gemdata.ny)
xmesh, ymesh = np.meshgrid(x, y)
m.contourf(xmesh,ymesh,arr)
plt.show()
| apache-2.0 |
iismd17/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
arjoly/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/core/frame.py | 1 | 332011 | """
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import annotations
import collections
from collections import abc
import datetime
from io import StringIO
import itertools
import mmap
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
Dict,
FrozenSet,
Hashable,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
overload,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import algos as libalgos, lib, properties
from pandas._libs.lib import no_default
from pandas._typing import (
AggFuncType,
AnyArrayLike,
ArrayLike,
Axes,
Axis,
ColspaceArgType,
CompressionOptions,
Dtype,
FilePathOrBuffer,
FloatFormatType,
FormattersType,
FrameOrSeriesUnion,
IndexKeyFunc,
IndexLabel,
Level,
Manager,
PythonFuncType,
Renamer,
StorageOptions,
Suffixes,
ValueKeyFunc,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_2d_arraylike_from_scalar,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_box_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
maybe_infer_to_datetimelike,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
infer_dtype_from_object,
is_bool_dtype,
is_dataclass,
is_datetime64_any_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms, common as com, generic, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.aggregation import reconstruct_func, relabel_result, transform
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.construction import extract_array, sanitize_masked_array
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
PeriodIndex,
ensure_index,
ensure_index_from_sequences,
)
from pandas.core.indexes.multi import MultiIndex, maybe_droplevels
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.core.internals import ArrayManager, BlockManager
from pandas.core.internals.construction import (
arrays_to_mgr,
dataclasses_to_dicts,
init_dict,
init_ndarray,
masked_rec_array_to_mgr,
mgr_to_mgr,
nested_data_to_arrays,
reorder_arrays,
sanitize_index,
to_arrays,
treat_as_nested,
)
from pandas.core.reshape.melt import melt
from pandas.core.series import Series
from pandas.core.sorting import get_group_index, lexsort_indexer, nargsort
from pandas.io.common import get_handle
from pandas.io.formats import console, format as fmt
from pandas.io.formats.info import BaseInfo, DataFrameInfo
import pandas.plotting
if TYPE_CHECKING:
from typing import Literal
from pandas._typing import TimedeltaConvertibleTypes, TimestampConvertibleTypes
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.core.resample import Resampler
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = {
"axes": "index, columns",
"klass": "DataFrame",
"axes_single_arg": "{0 or 'index', 1 or 'columns'}",
"axis": """axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
"inplace": """
inplace : boolean, default False
If True, performs operation inplace and returns None.""",
"optional_by": """
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.""",
"optional_labels": """labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
"optional_axis": """axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
}
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
When performing a cross merge, no column specifications to merge on are
allowed.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
* cross: creates the cartesian product from both frames, preserves the order
of the left keys.
.. versionadded:: 1.2.0
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : list-like, default is ("_x", "_y")
A length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
`right` should be left as-is, with no suffix. At least one of the
values must not be None.
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to the output DataFrame called "_merge" with
information on the source of each row. The column can be given a different
name by providing a string argument. The column will have a Categorical
type with the value of "left_only" for observations whose merge key only
appears in the left DataFrame, "right_only" for observations
whose merge key only appears in the right DataFrame, and "both"
if the observation's merge key is found in both DataFrames.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})
>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})
>>> df1
a b
0 foo 1
1 bar 2
>>> df2
a c
0 foo 3
1 baz 4
>>> df1.merge(df2, how='inner', on='a')
a b c
0 foo 1 3
>>> df1.merge(df2, how='left', on='a')
a b c
0 foo 1 3.0
1 bar 2 NaN
>>> df1 = pd.DataFrame({'left': ['foo', 'bar']})
>>> df2 = pd.DataFrame({'right': [7, 8]})
>>> df1
left
0 foo
1 bar
>>> df2
right
0 7
1 8
>>> df1.merge(df2, how='cross')
left right
0 foo 7
1 foo 8
2 bar 7
3 bar 8
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame, OpsMixin):
"""
Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Data structure also contains labeled axes (rows and columns).
Arithmetic operations align on both row and column labels. Can be
thought of as a dict-like container for Series objects. The primary
pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, dataclass or list-like objects. If
data is a dict, column order follows insertion-order.
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
copy : bool, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_table : Read general delimited file into DataFrame.
read_clipboard : Read text from clipboard into DataFrame.
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
Constructing DataFrame from dataclass:
>>> from dataclasses import make_dataclass
>>> Point = make_dataclass("Point", [("x", int), ("y", int)])
>>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])
x y
0 0 0
1 0 3
2 2 3
"""
_internal_names_set = {"columns", "index"} | NDFrame._internal_names_set
_typ = "dataframe"
_HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray)
@property
def _constructor(self) -> Type[DataFrame]:
return DataFrame
_constructor_sliced: Type[Series] = Series
_hidden_attrs: FrozenSet[str] = NDFrame._hidden_attrs | frozenset([])
_accessors: Set[str] = {"sparse"}
@property
def _constructor_expanddim(self):
# GH#31549 raising NotImplementedError on a property causes trouble
# for `inspect`
def constructor(*args, **kwargs):
raise NotImplementedError("Not supported for DataFrames!")
return constructor
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index: Optional[Axes] = None,
columns: Optional[Axes] = None,
dtype: Optional[Dtype] = None,
copy: bool = False,
):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._mgr
if isinstance(data, (BlockManager, ArrayManager)):
if index is None and columns is None and dtype is None and copy is False:
# GH#33357 fastpath
NDFrame.__init__(self, data)
return
mgr = self._init_mgr(
data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy
)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)
# a masked array
else:
data = sanitize_masked_array(data)
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, "name", None) is not None:
mgr = init_dict({data.name: data}, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif is_list_like(data):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
data = list(data)
if len(data) > 0:
if is_dataclass(data[0]):
data = dataclasses_to_dicts(data)
if treat_as_nested(data):
arrays, columns, index = nested_data_to_arrays(
data, columns, index, dtype
)
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
# For data is scalar
else:
if index is None or columns is None:
raise ValueError("DataFrame constructor not properly called!")
if not dtype:
dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)
# For data is a scalar extension dtype
if is_extension_array_dtype(dtype):
# TODO(EA2D): special case not needed with 2D EAs
values = [
construct_1d_arraylike_from_scalar(data, len(index), dtype)
for _ in range(len(columns))
]
mgr = arrays_to_mgr(values, columns, index, columns, dtype=None)
else:
values = construct_2d_arraylike_from_scalar(
data, len(index), len(columns), dtype, copy
)
mgr = init_ndarray(
values, index, columns, dtype=values.dtype, copy=False
)
# ensure correct Manager type according to settings
manager = get_option("mode.data_manager")
mgr = mgr_to_mgr(mgr, typ=manager)
NDFrame.__init__(self, mgr)
def _as_manager(self, typ: str) -> DataFrame:
"""
Private helper function to create a DataFrame with specific manager.
Parameters
----------
typ : {"block", "array"}
Returns
-------
DataFrame
New DataFrame using specified manager type. Is not guaranteed
to be a copy or not.
"""
new_mgr: Manager
new_mgr = mgr_to_mgr(self._mgr, typ=typ)
# fastpath of passing a manager doesn't check the option/manager class
return DataFrame(new_mgr)
# ----------------------------------------------------------------------
@property
def axes(self) -> List[Index]:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape : Tuple of array dimensions.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self) -> bool:
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
See Also
--------
Index._is_homogeneous_type : Whether the object has a single
dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if isinstance(self._mgr, ArrayManager):
return len({arr.dtype for arr in self._mgr.arrays}) == 1
if self._mgr.any_extension_types:
return len({block.dtype for block in self._mgr.blocks}) == 1
else:
return not self._is_mixed_type
@property
def _can_fast_transpose(self) -> bool:
"""
Can we transpose this DataFrame without creating any new array objects.
"""
if isinstance(self._mgr, ArrayManager):
return False
if self._mgr.any_extension_types:
# TODO(EA2D) special case would be unnecessary with 2D EAs
return False
return len(self._mgr.blocks) == 1
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self) -> bool:
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case of non-interactive session, no boundaries apply.
`ignore_width` is here so ipynb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if (max_columns and nb_columns > max_columns) or (
(not ignore_width) and width and nb_columns > (width // 2)
):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if get_option("display.width") is not None or console.in_ipython_frontend():
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if max_rows is not None: # unlimited rows
# min of two, where one may be None
d = d.iloc[: min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(line) for line in value.split("\n"))
return repr_width < width
def _info_repr(self) -> bool:
"""
True if the repr should show the info view.
"""
info_repr_option = get_option("display.large_repr") == "info"
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
buf = StringIO("")
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
max_colwidth = get_option("display.max_colwidth")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(
buf=buf,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
max_colwidth=max_colwidth,
show_dimensions=show_dimensions,
)
return buf.getvalue()
def _repr_html_(self) -> Optional[str]:
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
val = val.replace(">", r">", 1)
return "<pre>" + val + "</pre>"
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
formatter = fmt.DataFrameFormatter(
self,
columns=None,
col_space=None,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
justify=None,
index_names=True,
header=True,
index=True,
bold_rows=True,
escape=True,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=".",
)
return fmt.DataFrameRenderer(formatter).to_html(notebook=True)
else:
return None
@Substitution(
header_type="bool or sequence",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int, list or dict of int",
col_space="The minimum width of each column",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
columns: Optional[Sequence[str]] = None,
col_space: Optional[int] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[fmt.FormattersType] = None,
float_format: Optional[fmt.FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: Optional[int] = None,
max_colwidth: Optional[int] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
.. versionadded:: 1.0.0
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
)
return fmt.DataFrameRenderer(formatter).to_string(
buf=buf,
encoding=encoding,
line_width=line_width,
)
# ----------------------------------------------------------------------
@property
def style(self) -> Styler:
"""
Returns a Styler object.
Contains methods for building a styled HTML representation of the DataFrame.
See Also
--------
io.formats.style.Styler : Helps style a DataFrame or Series according to the
data with HTML and CSS.
"""
from pandas.io.formats.style import Styler
return Styler(self)
_shared_docs[
"items"
] = r"""
Iterate over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
... print(f'label: {label}')
... print(f'content: {content}', sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
@Appender(_shared_docs["items"])
def items(self) -> Iterable[Tuple[Hashable, Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"])
def iteritems(self) -> Iterable[Tuple[Hashable, Series]]:
yield from self.items()
def iterrows(self) -> Iterable[Tuple[Hashable, Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index: bool = True, name: Optional[str] = "Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
if name is not None:
# https://github.com/python/mypy/issues/9046
# error: namedtuple() expects a string literal as the first argument
itertuple = collections.namedtuple( # type: ignore[misc]
name, fields, rename=True
)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
def __len__(self) -> int:
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
# pandas/core/frame.py:1146: error: Overloaded function signatures 1 and 2
# overlap with incompatible return types [misc]
@overload
def dot(self, other: Series) -> Series: # type: ignore[misc]
...
@overload
def dot(self, other: Union[DataFrame, Index, ArrayLike]) -> DataFrame:
...
def dot(self, other: Union[AnyArrayLike, FrameOrSeriesUnion]) -> FrameOrSeriesUnion:
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Series. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
Note how shuffling of the objects does not change the result.
>>> s2 = s.reindex([1, 0, 2, 3])
>>> df.dot(s2)
0 -4
1 5
dtype: int64
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right._values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=left.index, columns=other.columns
)
elif isinstance(other, Series):
return self._constructor_sliced(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return self._constructor_sliced(result, index=left.index)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
@overload
def __matmul__(self, other: Series) -> Series:
...
@overload
def __matmul__(
self, other: Union[AnyArrayLike, FrameOrSeriesUnion]
) -> FrameOrSeriesUnion:
...
def __matmul__(
self, other: Union[AnyArrayLike, FrameOrSeriesUnion]
) -> FrameOrSeriesUnion:
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
try:
return self.T.dot(np.transpose(other)).T
except ValueError as err:
if "shape mismatch" not in str(err):
raise
# GH#21581 give exception message for original shapes
msg = f"shapes {np.shape(other)} and {self.shape} not aligned"
raise ValueError(msg) from err
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> DataFrame:
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == "index":
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == "columns":
if columns is not None:
raise ValueError("cannot use columns parameter with orient='columns'")
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
return cls(data, index=index, columns=columns, dtype=dtype)
def to_numpy(
self, dtype=None, copy: bool = False, na_value=lib.no_default
) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the dtypes of the DataFrame columns.
.. versionadded:: 1.1.0
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
self._consolidate_inplace()
result = self._mgr.as_array(
transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value
)
if result.dtype is not dtype:
result = np.array(result, dtype=dtype, copy=False)
return result
def to_dict(self, orient: str = "dict", into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = com.standardize_mapping(into)
orient = orient.lower()
# GH32515
if orient.startswith(("d", "l", "s", "r", "i")) and orient not in {
"dict",
"list",
"series",
"split",
"records",
"index",
}:
warnings.warn(
"Using short name for 'orient' is deprecated. Only the "
"options: ('dict', list, 'series', 'split', 'records', 'index') "
"will be used in a future version. Use one of the above "
"to silence this warning.",
FutureWarning,
)
if orient.startswith("d"):
orient = "dict"
elif orient.startswith("l"):
orient = "list"
elif orient.startswith("sp"):
orient = "split"
elif orient.startswith("s"):
orient = "series"
elif orient.startswith("r"):
orient = "records"
elif orient.startswith("i"):
orient = "index"
if orient == "dict":
return into_c((k, v.to_dict(into)) for k, v in self.items())
elif orient == "list":
return into_c((k, v.tolist()) for k, v in self.items())
elif orient == "split":
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(maybe_box_datetimelike, t))
for t in self.itertuples(index=False, name=None)
],
),
)
)
elif orient == "series":
return into_c((k, maybe_box_datetimelike(v)) for k, v in self.items())
elif orient == "records":
columns = self.columns.tolist()
rows = (
dict(zip(columns, row))
for row in self.itertuples(index=False, name=None)
)
return [
into_c((k, maybe_box_datetimelike(v)) for k, v in row.items())
for row in rows
]
elif orient == "index":
if not self.index.is_unique:
raise ValueError("DataFrame index must be unique for orient='index'.")
return into_c(
(t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None)
)
else:
raise ValueError(f"orient '{orient}' not understood")
def to_gbq(
self,
destination_table: str,
project_id: Optional[str] = None,
chunksize: Optional[int] = None,
reauth: bool = False,
if_exists: str = "fail",
auth_local_webserver: bool = False,
table_schema: Optional[List[Dict[str, str]]] = None,
location: Optional[str] = None,
progress_bar: bool = True,
credentials=None,
) -> None:
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists raise pandas_gbq.gbq.TableCreationError.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
gbq.to_gbq(
self,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float: bool = False,
nrows=None,
) -> DataFrame:
"""
Convert structured or record ndarray to DataFrame.
Creates a DataFrame object from a structured ndarray, sequence of
tuples or dicts, or DataFrame.
Parameters
----------
data : structured ndarray, sequence of tuples or dicts, or DataFrame
Structured input data.
index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use.
exclude : sequence, default None
Columns or fields to exclude.
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns).
coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
nrows : int, default None
Number of rows to read if data is an iterator.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_dict : DataFrame from dict of array-like or dicts.
DataFrame : DataFrame object creation using constructor.
Examples
--------
Data can be provided as a structured ndarray:
>>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],
... dtype=[('col_1', 'i4'), ('col_2', 'U1')])
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of dicts:
>>> data = [{'col_1': 3, 'col_2': 'a'},
... {'col_1': 2, 'col_2': 'b'},
... {'col_1': 1, 'col_2': 'c'},
... {'col_1': 0, 'col_2': 'd'}]
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of tuples with corresponding columns:
>>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]
>>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, "dtype") and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns_list = []
for k, v in data.items():
if k in columns:
arr_columns_list.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns)
if coerce_float:
for i, arr in enumerate(arrays):
if arr.dtype == object:
arrays[i] = lib.maybe_convert_objects(arr, try_float=True)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if isinstance(index, str) or not hasattr(index, "__iter__"):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)] for field in index]
except (KeyError, TypeError):
# raised by get_loc, see GH#29258
result_index = index
else:
result_index = ensure_index_from_sequences(index_data, names=index)
exclude.update(index)
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(
self, index=True, column_dtypes=None, index_dtypes=None
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = f"<S{df.index.str.len().max()}"
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if index:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index._values)))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [
np.asarray(self.iloc[:, i]) for i in range(len(self.columns))
]
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
count = 0
for i, n in enumerate(index_names):
if n is None:
index_names[i] = f"level_{count}"
count += 1
elif index_names[0] is None:
index_names = ["index"]
names = [str(name) for name in itertools.chain(index_names, self.columns)]
else:
arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]
names = [str(c) for c in self.columns]
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
def _from_arrays(
cls,
arrays,
columns,
index,
dtype: Optional[Dtype] = None,
verify_integrity: bool = True,
) -> DataFrame:
"""
Create DataFrame from a list of arrays corresponding to the columns.
Parameters
----------
arrays : list-like of arrays
Each array in the list corresponds to one column, in order.
columns : list-like, Index
The column names for the resulting DataFrame.
index : list-like, Index
The rows labels for the resulting DataFrame.
dtype : dtype, optional
Optional dtype to enforce for all arrays.
verify_integrity : bool, default True
Validate and homogenize all input. If set to False, it is assumed
that all elements of `arrays` are actual arrays how they will be
stored in a block (numpy ndarray or ExtensionArray), have the same
length as and are aligned with the index, and that `columns` and
`index` are ensured to be an Index object.
Returns
-------
DataFrame
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
mgr = arrays_to_mgr(
arrays,
columns,
index,
columns,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls(mgr)
@doc(storage_options=generic._shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
path: FilePathOrBuffer,
convert_dates: Optional[Dict[Hashable, str]] = None,
write_index: bool = True,
byteorder: Optional[str] = None,
time_stamp: Optional[datetime.datetime] = None,
data_label: Optional[str] = None,
variable_labels: Optional[Dict[Hashable, str]] = None,
version: Optional[int] = 114,
convert_strl: Optional[Sequence[Hashable]] = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> None:
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
path : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
.. versionchanged:: 1.0.0
Previously this was "fname"
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
version : {{114, 117, 118, 119, None}}, default 114
Version to use in the output dta file. Set to None to let pandas
decide between 118 or 119 formats depending on the number of
columns in the frame. Version 114 can be read by Stata 10 and
later. Version 117 can be read by Stata 13 or later. Version 118
is supported in Stata 14 and later. Version 119 is supported in
Stata 15 and later. Version 114 limits string variables to 244
characters or fewer while versions 117 and later allow strings
with lengths up to 2,000,000 characters. Versions 118 and 119
support Unicode characters, and version 119 supports more than
32,767 variables.
Version 119 should usually only be used when the number of
variables exceeds the capacity of dta format 118. Exporting
smaller datasets in format 119 may have unintended consequences,
and, as of November 2020, Stata SE cannot read version 119 files.
.. versionchanged:: 1.0.0
Added support for formats 118 and 119.
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
compression : str or dict, default 'infer'
For on-the-fly compression of the output dta. If string, specifies
compression mode. If dict, value at key 'method' specifies
compression mode. Compression mode must be one of {{'infer', 'gzip',
'bz2', 'zip', 'xz', None}}. If compression mode is 'infer' and
`fname` is path-like, then detect compression from the following
extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
compression). If dict and compression mode is one of {{'zip',
'gzip', 'bz2'}}, or inferred as one of the above, other entries
passed as additional compression options.
.. versionadded:: 1.1.0
{storage_options}
.. versionadded:: 1.2.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
if version not in (114, 117, 118, 119, None):
raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
if version == 114:
if convert_strl is not None:
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
elif version == 117:
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriter117 as statawriter,
)
else: # versions 118 and 119
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriterUTF8 as statawriter,
)
kwargs: Dict[str, Any] = {}
if version is None or version >= 117:
# strl conversion is only supported >= 117
kwargs["convert_strl"] = convert_strl
if version is None or version >= 118:
# Specifying the version is only supported for UTF8 (118 or 119)
kwargs["version"] = version
# mypy: Too many arguments for "StataWriter"
writer = statawriter( # type: ignore[call-arg]
path,
self,
convert_dates=convert_dates,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
compression=compression,
storage_options=storage_options,
**kwargs,
)
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None:
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
path : str or file-like object
If a string, it will be used as Root Directory path.
**kwargs :
Additional keywords passed to :func:`pyarrow.feather.write_feather`.
Starting with pyarrow 0.17, this includes the `compression`,
`compression_level`, `chunksize` and `version` keywords.
.. versionadded:: 1.1.0
"""
from pandas.io.feather_format import to_feather
to_feather(self, path, **kwargs)
@doc(
Series.to_markdown,
klass=_shared_doc_kwargs["klass"],
storage_options=_shared_docs["storage_options"],
examples="""Examples
--------
>>> df = pd.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(df.to_markdown())
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
Output markdown with a tabulate option.
>>> print(df.to_markdown(tablefmt="grid"))
+----+------------+------------+
| | animal_1 | animal_2 |
+====+============+============+
| 0 | elk | dog |
+----+------------+------------+
| 1 | pig | quetzal |
+----+------------+------------+
""",
)
def to_markdown(
self,
buf: Optional[Union[IO[str], str]] = None,
mode: str = "wt",
index: bool = True,
storage_options: StorageOptions = None,
**kwargs,
) -> Optional[str]:
if "showindex" in kwargs:
warnings.warn(
"'showindex' is deprecated. Only 'index' will be used "
"in a future version. Use 'index' to silence this warning.",
FutureWarning,
stacklevel=2,
)
kwargs.setdefault("headers", "keys")
kwargs.setdefault("tablefmt", "pipe")
kwargs.setdefault("showindex", index)
tabulate = import_optional_dependency("tabulate")
result = tabulate.tabulate(self, **kwargs)
if buf is None:
return result
with get_handle(buf, mode, storage_options=storage_options) as handles:
assert not isinstance(handles.handle, (str, mmap.mmap))
handles.handle.writelines(result)
return None
@doc(storage_options=generic._shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_parquet(
self,
path: Optional[FilePathOrBuffer] = None,
engine: str = "auto",
compression: Optional[str] = "snappy",
index: Optional[bool] = None,
partition_cols: Optional[List[str]] = None,
storage_options: StorageOptions = None,
**kwargs,
) -> Optional[bytes]:
"""
Write a DataFrame to the binary parquet format.
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
path : str or file-like object, default None
If a string, it will be used as Root Directory path
when writing a partitioned dataset. By file-like object,
we refer to objects with a write() method, such as a file handle
(e.g. via builtin open function) or io.BytesIO. The engine
fastparquet does not accept file-like objects. If path is None,
a bytes object is returned.
.. versionchanged:: 1.2.0
Previously this was "fname"
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
Must be None if path is not a string.
.. versionadded:: 0.24.0
{storage_options}
.. versionadded:: 1.2.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
Returns
-------
bytes if no path argument is provided else None
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
If you want to get a buffer to the parquet content you can use a io.BytesIO
object, as long as you don't use partition_cols, which creates multiple files.
>>> import io
>>> f = io.BytesIO()
>>> df.to_parquet(f)
>>> f.seek(0)
0
>>> content = f.read()
"""
from pandas.io.parquet import to_parquet
return to_parquet(
self,
path,
engine,
compression=compression,
index=index,
partition_cols=partition_cols,
storage_options=storage_options,
**kwargs,
)
@Substitution(
header_type="bool",
header="Whether to print column labels, default True",
col_space_type="str or int, list or dict of int or str",
col_space="The minimum width of each column in CSS length "
"units. An int is assumed to be px units.\n\n"
" .. versionadded:: 0.25.0\n"
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
columns: Optional[Sequence[str]] = None,
col_space: Optional[ColspaceArgType] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[FormattersType] = None,
float_format: Optional[FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
max_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: Union[bool, str] = False,
decimal: str = ".",
bold_rows: bool = True,
classes: Optional[Union[str, List, Tuple]] = None,
escape: bool = True,
notebook: bool = False,
border: Optional[int] = None,
table_id: Optional[str] = None,
render_links: bool = False,
encoding: Optional[str] = None,
):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
justify=justify,
index_names=index_names,
escape=escape,
decimal=decimal,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
)
# TODO: a generic formatter wld b in DataFrameFormatter
return fmt.DataFrameRenderer(formatter).to_html(
buf=buf,
classes=classes,
notebook=notebook,
border=border,
encoding=encoding,
table_id=table_id,
render_links=render_links,
)
# ----------------------------------------------------------------------
@Substitution(
klass="DataFrame",
type_sub=" and columns",
max_cols_sub=dedent(
"""\
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used."""
),
show_counts_sub=dedent(
"""\
show_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the DataFrame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
null_counts : bool, optional
.. deprecated:: 1.2.0
Use show_counts instead."""
),
examples_sub=dedent(
"""\
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 165.9 MB"""
),
see_also_sub=dedent(
"""\
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns."""
),
version_added_sub="",
)
@doc(BaseInfo.render)
def info(
self,
verbose: Optional[bool] = None,
buf: Optional[IO[str]] = None,
max_cols: Optional[int] = None,
memory_usage: Optional[Union[bool, str]] = None,
show_counts: Optional[bool] = None,
null_counts: Optional[bool] = None,
) -> None:
if null_counts is not None:
if show_counts is not None:
raise ValueError("null_counts used with show_counts. Use show_counts.")
warnings.warn(
"null_counts is deprecated. Use show_counts instead",
FutureWarning,
stacklevel=2,
)
show_counts = null_counts
info = DataFrameInfo(
data=self,
memory_usage=memory_usage,
)
info.render(
buf=buf,
max_cols=max_cols,
verbose=verbose,
show_counts=show_counts,
)
def memory_usage(self, index=True, deep=False) -> Series:
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 128
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 128
int64 40000
float64 40000
complex128 80000
object 180000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5244
"""
result = self._constructor_sliced(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
result = self._constructor_sliced(
self.index.memory_usage(deep=deep), index=["Index"]
).append(result)
return result
def transpose(self, *args, copy: bool = False) -> DataFrame:
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
*args : tuple, optional
Accepted for compatibility with NumPy.
copy : bool, default False
Whether to copy the data after transposing, even for DataFrames
with a single dtype.
Note that a copy is always required for mixed dtype DataFrames,
or for DataFrames with any extension types.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8.0
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, {})
# construct the args
dtypes = list(self.dtypes)
if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):
# We have EAs with the same dtype. We can preserve that dtype in transpose.
dtype = dtypes[0]
arr_type = dtype.construct_array_type()
values = self.values
new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]
result = self._constructor(
dict(zip(self.index, new_values)), index=self.columns
)
else:
new_values = self.values.T
if copy:
new_values = new_values.copy()
result = self._constructor(
new_values, index=self.columns, columns=self.index
)
return result.__finalize__(self, method="transpose")
@property
def T(self) -> DataFrame:
return self.transpose()
# ----------------------------------------------------------------------
# Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
Parameters
----------
i : int
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
new_values = self._mgr.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values, np.ndarray) and new_values.base is None
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype,
)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
values = self._mgr.iget(i)
result = self._box_col_values(values, i)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def _get_column_array(self, i: int) -> ArrayLike:
"""
Get the values of the i'th column (ndarray or ExtensionArray, as stored
in the Block)
"""
return self._mgr.iget_values(i)
def _iter_column_arrays(self) -> Iterator[ArrayLike]:
"""
Iterate over the arrays of all columns in order.
This returns the values as stored in the Block (ndarray or ExtensionArray).
"""
for i in range(len(self.columns)):
yield self._get_column_array(i)
def __getitem__(self, key):
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
if is_hashable(key):
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
if isinstance(self.columns, MultiIndex):
return self._getitem_multilevel(key)
return self._get_item_cache(key)
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
if isinstance(indexer, np.ndarray):
indexer = lib.maybe_indices_to_slice(
indexer.astype(np.intp, copy=False), len(self)
)
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take_with_is_copy(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
# GH#26490 using data[key] can cause RecursionError
data = data._get_item_cache(key)
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
UserWarning,
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}."
)
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take_with_is_copy(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(
new_values, index=self.index, columns=result_columns
)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == "":
result = result[""]
if isinstance(result, Series):
result = self._constructor_sliced(
result, index=self.index, name=key
)
result._set_is_copy(self)
return result
else:
# loc is neither a slice nor ndarray, so must be an int
return self._ixs(loc, axis=1)
def _get_value(self, index, col, takeable: bool = False):
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
if takeable:
series = self._ixs(col, axis=1)
return series._values[index]
series = self._get_item_cache(col)
engine = self.index._engine
try:
loc = engine.get_loc(index)
return series._values[loc]
except KeyError:
# GH 20629
if self.index.nlevels > 1:
# partial indexing forbidden
raise
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(value, DataFrame):
self._set_item_frame_value(key, value)
elif is_list_like(value) and 1 < len(
self.columns.get_indexer_for([key])
) == len(value):
# Column to set is duplicated
self._setitem_array([key], value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key: slice, value):
# NB: we can't just use self.loc[key] = value because that
# operates on labels and we need to operate positional for
# backwards-compat, xref GH#31469
self._check_setitem_copy()
self.iloc[key] = value
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.iloc[indexer] = value
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError("Columns must be same length as key")
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
self.loc._ensure_listlike_indexer(key, axis=1, value=value)
indexer = self.loc._get_listlike_indexer(
key, axis=1, raise_missing=False
)[1]
self._check_setitem_copy()
self.iloc[:, indexer] = value
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
key = self._constructor(key, **self._construct_axes_dict())
if key.size and not is_bool_dtype(key.values):
raise TypeError(
"Must pass DataFrame or 2-d ndarray with boolean values only"
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _set_item_frame_value(self, key, value: DataFrame) -> None:
self._ensure_valid_index(value)
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = _reindex_for_setitem(value, self.index)
value = value.T
self._set_item_mgr(key, value)
def _iset_item_mgr(self, loc: int, value) -> None:
self._mgr.iset(loc, value)
self._clear_item_cache()
def _set_item_mgr(self, key, value):
value = _maybe_atleast_2d(value)
try:
loc = self._info_axis.get_loc(key)
except KeyError:
# This item wasn't present, just insert at end
self._mgr.insert(len(self._info_axis), key, value)
else:
self._iset_item_mgr(loc, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _iset_item(self, loc: int, value):
value = self._sanitize_column(value)
value = _maybe_atleast_2d(value)
self._iset_item_mgr(loc, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
value = self._sanitize_column(value)
if (
key in self.columns
and value.ndim == 1
and not is_extension_array_dtype(value)
):
# broadcast across multiple columns if necessary
if not self.columns.is_unique or isinstance(self.columns, MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
self._set_item_mgr(key, value)
def _set_value(self, index, col, value, takeable: bool = False):
"""
Put single value at passed column and index.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
"""
try:
if takeable:
series = self._ixs(col, axis=1)
series._set_value(index, value, takeable=True)
return
series = self._get_item_cache(col)
engine = self.index._engine
loc = engine.get_loc(index)
validate_numeric_casting(series.dtype, value)
series._values[loc] = value
# Note: trying to use series._set_value breaks tests in
# tests.frame.indexing.test_indexing and tests.indexing.test_partial
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
if not isinstance(value, DataFrame):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError) as err:
raise ValueError(
"Cannot set a frame with no defined index "
"and a value that cannot be converted to a Series"
) from err
# GH31368 preserve name of index
index_copy = value.index.copy()
if self.index.name is not None:
index_copy.name = self.index.name
self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)
def _box_col_values(self, values, loc: int) -> Series:
"""
Provide boxed values for a column.
"""
# Lookup in columns so that if e.g. a str datetime was passed
# we attach the Timestamp object as the name.
name = self.columns[loc]
klass = self._constructor_sliced
return klass(values, index=self.index, name=name, fastpath=True)
# ----------------------------------------------------------------------
# Unsorted
def query(self, expr: str, inplace: bool = False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate.
You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
You can refer to column names that are not valid Python variable names
by surrounding them in backticks. Thus, column names containing spaces
or punctuations (besides underscores) or starting with digits must be
surrounded by backticks. (For example, a column named "Area (cm^2) would
be referenced as `Area (cm^2)`). Column names which are Python keywords
(like "list", "for", "import", etc) cannot be used.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
.. versionadded:: 0.25.0
Backtick quoting introduced.
.. versionadded:: 1.0.0
Expanding functionality of backtick quoting for more than only spaces.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
DataFrame or None
DataFrame resulting from the provided query expression or
None if ``inplace=True``.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
*Backtick quoted variables*
Backtick quoted variables are parsed as literal Python code and
are converted internally to a Python valid identifier.
This can lead to the following problems.
During parsing a number of disallowed characters inside the backtick
quoted string are replaced by strings that are allowed as a Python identifier.
These characters include all operators in Python, the space character, the
question mark, the exclamation mark, the dollar sign, and the euro sign.
For other characters that fall outside the ASCII range (U+0001..U+007F)
and those that are not further specified in PEP 3131,
the query parser will raise an error.
This excludes whitespace different than the space character,
but also the hashtag (as it is used for comments) and the backtick
itself (backtick can also not be escaped).
In a special case, quotes that make a pair around a backtick can
confuse the parser.
For example, ```it's` > `that's``` will raise an error,
as it forms a quoted string (``'s > `that'``) with a backtick inside.
See also the Python documentation about lexical analysis
(https://docs.python.org/3/reference/lexical_analysis.html)
in combination with the source code in :mod:`pandas.core.computation.parsing`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
msg = f"expr must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
try:
result = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
result = self[res]
if inplace:
self._update_inplace(result)
else:
return result
def eval(self, expr: str, inplace: bool = False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, pandas object, or None
The result of the evaluation or None if ``inplace=True``.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
Multiple columns can be assigned to using multi-line expressions:
>>> df.eval(
... '''
... C = A + B
... D = A - B
... '''
... )
A B C D
0 1 10 11 -9
1 2 8 10 -6
2 3 6 9 -3
3 4 4 8 0
4 5 2 7 3
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, "inplace")
resolvers = kwargs.pop("resolvers", None)
kwargs["level"] = kwargs.pop("level", 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
column_resolvers = self._get_cleaned_column_resolvers()
resolvers = column_resolvers, index_resolvers
if "target" not in kwargs:
kwargs["target"] = self
kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None) -> DataFrame:
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
See Also
--------
DataFrame.dtypes: Return Series with the data type of each column.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<https://numpy.org/doc/stable/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int64'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = (frozenset(include), frozenset(exclude))
if not any(selection):
raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
include = frozenset(infer_dtype_from_object(x) for x in include)
exclude = frozenset(infer_dtype_from_object(x) for x in exclude)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(f"include and exclude overlap on {(include & exclude)}")
# We raise when both include and exclude are empty
# Hence, we can just shrink the columns we want to keep
keep_these = np.full(self.shape[1], True)
def extract_unique_dtypes_from_dtypes_set(
dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray
) -> List[Dtype]:
extracted_dtypes = [
unique_dtype
for unique_dtype in unique_dtypes
if (
issubclass(
unique_dtype.type, tuple(dtypes_set) # type: ignore[arg-type]
)
or (
np.number in dtypes_set
and getattr(unique_dtype, "_is_numeric", False)
)
)
]
return extracted_dtypes
unique_dtypes = self.dtypes.unique()
if include:
included_dtypes = extract_unique_dtypes_from_dtypes_set(
include, unique_dtypes
)
keep_these &= self.dtypes.isin(included_dtypes)
if exclude:
excluded_dtypes = extract_unique_dtypes_from_dtypes_set(
exclude, unique_dtypes
)
keep_these &= ~self.dtypes.isin(excluded_dtypes)
return self.iloc[:, keep_these.values]
def insert(self, loc, column, value, allow_duplicates: bool = False) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
See Also
--------
Index.insert : Insert new item by index.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df
col1 col2
0 1 3
1 2 4
>>> df.insert(1, "newcol", [99, 99])
>>> df
col1 newcol col2
0 1 99 3
1 2 99 4
>>> df.insert(0, "col1", [100, 100], allow_duplicates=True)
>>> df
col1 col1 newcol col2
0 100 1 99 3
1 100 2 99 4
"""
if allow_duplicates and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'allow_duplicates=True' when "
"'self.flags.allows_duplicate_labels' is False."
)
value = self._sanitize_column(value)
value = _maybe_atleast_2d(value)
self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)
def assign(self, **kwargs) -> DataFrame:
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
def _sanitize_column(self, value):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
value : scalar, Series, or array-like
Returns
-------
numpy.ndarray
"""
self._ensure_valid_index(value)
# We should never get here with DataFrame value
if isinstance(value, Series):
value = _reindex_for_setitem(value, self.index)
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
value = construct_1d_arraylike_from_scalar(value, len(self), dtype=None)
return value
@property
def _series(self):
return {
item: Series(
self._mgr.iget(idx), index=self.index, name=item, fastpath=True
)
for idx, item in enumerate(self.columns)
}
def lookup(self, row_labels, col_labels) -> np.ndarray:
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
.. deprecated:: 1.2.0
DataFrame.lookup is deprecated,
use DataFrame.melt and DataFrame.loc instead.
For an example see :meth:`~pandas.DataFrame.lookup`
in the user guide.
Parameters
----------
row_labels : sequence
The row labels to use for lookup.
col_labels : sequence
The column labels to use for lookup.
Returns
-------
numpy.ndarray
The found values.
"""
msg = (
"The 'lookup' method is deprecated and will be"
"removed in a future version."
"You can use DataFrame.melt and DataFrame.loc"
"as a substitute."
)
warnings.warn(msg, FutureWarning, stacklevel=2)
n = len(row_labels)
if n != len(col_labels):
raise ValueError("Row labels must have same size as column labels")
if not (self.index.is_unique and self.columns.is_unique):
# GH#33041
raise ValueError("DataFrame.lookup requires unique index and columns")
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError("One or more row labels was not found")
if (cidx == -1).any():
raise KeyError("One or more column labels was not found")
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype="O")
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
frame = self
columns = axes["columns"]
if columns is not None:
frame = frame._reindex_columns(
columns, method, copy, level, fill_value, limit, tolerance
)
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
index, method, copy, level, fill_value, limit, tolerance
)
return frame
def _reindex_index(
self,
new_index,
method,
copy: bool,
level: Level,
fill_value=np.nan,
limit=None,
tolerance=None,
):
new_index, indexer = self.index.reindex(
new_index, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{0: [new_index, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_columns(
self,
new_columns,
method,
copy: bool,
level: Level,
fill_value=None,
limit=None,
tolerance=None,
):
new_columns, indexer = self.columns.reindex(
new_columns, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{1: [new_columns, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_multi(self, axes, copy: bool, fill_value) -> DataFrame:
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(
self.values, indexer, fill_value=fill_value
)
return self._constructor(new_values, index=new_index, columns=new_columns)
else:
return self._reindex_with_indexers(
{0: [new_index, row_indexer], 1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value,
)
@doc(NDFrame.align, **_shared_doc_kwargs)
def align(
self,
other,
join: str = "outer",
axis: Optional[Axis] = None,
level: Optional[Level] = None,
copy: bool = True,
fill_value=None,
method: Optional[str] = None,
limit=None,
fill_axis: Axis = 0,
broadcast_axis: Optional[Axis] = None,
) -> DataFrame:
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index')
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns')
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
)
@Substitution(
**_shared_doc_kwargs,
extended_summary_sub=" column or",
axis_description_sub=", and 1 identifies the columns",
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
"labels",
[
("method", None),
("copy", True),
("level", None),
("fill_value", np.nan),
("limit", None),
("tolerance", None),
],
)
def reindex(self, *args, **kwargs) -> DataFrame:
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("labels", None)
return super().reindex(**kwargs)
def drop(
self,
labels=None,
axis: Axis = 0,
index=None,
columns=None,
level: Optional[Level] = None,
inplace: bool = False,
errors: str = "raise",
):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If False, return a copy. Otherwise, do operation
inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame or None
DataFrame without the removed index or column labels or
None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@rewrite_axis_style_signature(
"mapper",
[("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")],
)
def rename(
self,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[DataFrame]:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or function transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame or None
DataFrame with the renamed axis labels or None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Rename columns using a mapping:
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: "x", 1: "y", 2: "z"})
A B
x 1 4
y 2 5
z 3 6
Cast index labels to a different type:
>>> df.index
RangeIndex(start=0, stop=3, step=1)
>>> df.rename(index=str).index
Index(['0', '1', '2'], dtype='object')
>>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters:
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
return super().rename(
mapper=mapper,
index=index,
columns=columns,
axis=axis,
copy=copy,
inplace=inplace,
level=level,
errors=errors,
)
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
value=None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool = False,
limit=None,
downcast=None,
) -> Optional[DataFrame]:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def pop(self, item: Hashable) -> Series:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : label
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
return super().pop(item=item)
@doc(NDFrame.replace, **_shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace: bool = False,
limit=None,
regex: bool = False,
method: str = "pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def _replace_columnwise(
self, mapping: Dict[Hashable, Tuple[Any, Any]], inplace: bool, regex
):
"""
Dispatch to Series.replace column-wise.
Parameters
----------
mapping : dict
of the form {col: (target, value)}
inplace : bool
regex : bool or same types as `to_replace` in DataFrame.replace
Returns
-------
DataFrame or None
"""
# Operate column-wise
res = self if inplace else self.copy()
ax = self.columns
for i in range(len(ax)):
if ax[i] in mapping:
ser = self.iloc[:, i]
target, value = mapping[ax[i]]
newobj = ser.replace(target, value, regex=regex)
res.iloc[:, i] = newobj
if inplace:
return
return res.__finalize__(self)
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(
self, periods=1, freq=None, axis: Axis = 0, fill_value=lib.no_default
) -> DataFrame:
axis = self._get_axis_number(axis)
ncols = len(self.columns)
if axis == 1 and periods != 0 and fill_value is lib.no_default and ncols > 0:
# We will infer fill_value to match the closest column
# Use a column that we know is valid for our column's dtype GH#38434
label = self.columns[0]
if periods > 0:
result = self.iloc[:, :-periods]
for col in range(min(ncols, abs(periods))):
# TODO(EA2D): doing this in a loop unnecessary with 2D EAs
# Define filler inside loop so we get a copy
filler = self.iloc[:, 0].shift(len(self))
result.insert(0, label, filler, allow_duplicates=True)
else:
result = self.iloc[:, -periods:]
for col in range(min(ncols, abs(periods))):
# Define filler inside loop so we get a copy
filler = self.iloc[:, -1].shift(len(self))
result.insert(
len(result.columns), label, filler, allow_duplicates=True
)
result.columns = self.columns.copy()
return result
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def set_index(
self,
keys,
drop: bool = True,
append: bool = False,
inplace: bool = False,
verify_integrity: bool = False,
):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
If True, modifies the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame or None
Changed row labels or None if ``inplace=True``.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if not isinstance(keys, list):
keys = [keys]
err_msg = (
'The parameter "keys" may be a column key, one-dimensional '
"array, or a list containing only valid column keys and "
"one-dimensional arrays."
)
missing: List[Hashable] = []
for col in keys:
if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, "ndim", 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError as err:
raise TypeError(
f"{err_msg}. Received column of type {type(col)}"
) from err
else:
if not found:
missing.append(col)
if missing:
raise KeyError(f"None of {missing} are in the columns")
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names: List[Hashable] = []
if append:
names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove: List[Hashable] = []
for col in keys:
if isinstance(col, MultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (Index, Series)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif isinstance(col, abc.Iterator):
arrays.append(list(col))
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
f"Length mismatch: Expected {len(self)} rows, "
f"received array of length {len(arrays[-1])}"
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError(f"Index has duplicate keys: {duplicates}")
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
@overload
# https://github.com/python/mypy/issues/6580
# Overloaded function signatures 1 and 2 overlap with incompatible return types
def reset_index( # type: ignore[misc]
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,
drop: bool = ...,
inplace: Literal[False] = ...,
col_level: Hashable = ...,
col_fill: Hashable = ...,
) -> DataFrame:
...
@overload
def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,
drop: bool = ...,
inplace: Literal[True] = ...,
col_level: Hashable = ...,
col_fill: Hashable = ...,
) -> None:
...
def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Hashable = "",
) -> Optional[DataFrame]:
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if inplace:
new_obj = self
else:
new_obj = self.copy()
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
to_insert: Iterable[Tuple[Any, Optional[Any]]]
if isinstance(self.index, MultiIndex):
names = [
(n if n is not None else f"level_{i}")
for i, n in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if level is not None and i not in level:
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
f"with incomplete column name {name}"
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = lev._values
if level_values.dtype == np.object_:
level_values = lib.maybe_convert_objects(level_values)
if lab is not None:
# if we have the codes, extract the values with a mask
level_values = algorithms.take(
level_values, lab, allow_fill=True, fill_value=lev._na_value
)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
return None
# ----------------------------------------------------------------------
# Reindex-based selection methods
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isna(self) -> DataFrame:
result = self._constructor(self._mgr.isna(func=isna))
return result.__finalize__(self, method="isna")
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isnull(self) -> DataFrame:
return self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notna(self) -> DataFrame:
return ~self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notnull(self) -> DataFrame:
return ~self.isna()
def dropna(
self,
axis: Axis = 0,
how: str = "any",
thresh=None,
subset=None,
inplace: bool = False,
):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. versionchanged:: 1.0.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame or None
DataFrame with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'toy'])
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
raise TypeError("supplying multiple axes to axis is no longer supported.")
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == "any":
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == "all":
mask = count > 0
else:
if how is not None:
raise ValueError(f"invalid how option: {how}")
else:
raise TypeError("must specify how or thresh")
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> Optional[DataFrame]:
"""
Return DataFrame with duplicate rows removed.
Considering certain columns is optional. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : bool, default False
Whether to drop duplicates in place or to return a copy.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
DataFrame or None
DataFrame with duplicates removed or None if ``inplace=True``.
See Also
--------
DataFrame.value_counts: Count unique combinations of columns.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, it removes duplicate rows based on all columns.
>>> df.drop_duplicates()
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
To remove duplicates on specific column(s), use ``subset``.
>>> df.drop_duplicates(subset=['brand'])
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
To remove duplicates and keep last occurrences, use ``keep``.
>>> df.drop_duplicates(subset=['brand', 'style'], keep='last')
brand style rating
1 Yum Yum cup 4.0
2 Indomie cup 3.5
4 Indomie pack 5.0
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
ignore_index = validate_bool_kwarg(ignore_index, "ignore_index")
duplicated = self.duplicated(subset, keep=keep)
result = self[-duplicated]
if ignore_index:
result.index = ibase.default_index(len(result))
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
) -> Series:
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
Boolean series for each duplicated rows.
See Also
--------
Index.duplicated : Equivalent method on index.
Series.duplicated : Equivalent method on Series.
Series.drop_duplicates : Remove duplicate values from Series.
DataFrame.drop_duplicates : Remove duplicate values from DataFrame.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, for each set of duplicated values, the first occurrence
is set on False and all others on True.
>>> df.duplicated()
0 False
1 True
2 False
3 False
4 False
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True.
>>> df.duplicated(keep='last')
0 True
1 False
2 False
3 False
4 False
dtype: bool
By setting ``keep`` on False, all duplicates are True.
>>> df.duplicated(keep=False)
0 True
1 True
2 False
3 False
4 False
dtype: bool
To find duplicates on specific column(s), use ``subset``.
>>> df.duplicated(subset=['brand'])
0 False
1 True
2 False
3 True
4 True
dtype: bool
"""
from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64
if self.empty:
return self._constructor_sliced(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)
if subset is None:
subset = self.columns
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# needed for mypy since can't narrow types using np.iterable
subset = cast(Iterable, subset)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
result = self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)
return result.__finalize__(self, method="duplicated")
# ----------------------------------------------------------------------
# Sorting
# TODO: Just move the sort_values doc here.
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
# error: Signature of "sort_values" incompatible with supertype "NDFrame"
def sort_values( # type: ignore[override]
self,
by,
axis: Axis = 0,
ascending=True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool = False,
key: ValueKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
# need to rewrap columns in Series to apply key function
if key is not None:
keys = [Series(k, name=name) for (k, name) in zip(keys, by)]
indexer = lexsort_indexer(
keys, orders=ascending, na_position=na_position, key=key
)
indexer = ensure_platform_int(indexer)
else:
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
# need to rewrap column in Series to apply key function
if key is not None:
k = Series(k, name=by)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position, key=key
)
new_data = self._mgr.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.set_axis(1, ibase.default_index(len(indexer)))
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
def sort_index(
self,
axis: Axis = 0,
level: Optional[Level] = None,
ascending: bool = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key: IndexKeyFunc = None,
):
"""
Sort object by labels (along an axis).
Returns a new DataFrame sorted by label if `inplace` argument is
``False``, otherwise updates the original DataFrame and returns None.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool or list of bools, default True
Sort ascending vs. descending. When the index is a MultiIndex the
sort direction can be controlled for each level individually.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. `mergesort` and `stable` are the only stable algorithms. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape. For MultiIndex
inputs, the key is applied *per level*.
.. versionadded:: 1.1.0
Returns
-------
DataFrame or None
The original DataFrame sorted by the labels or None if ``inplace=True``.
See Also
--------
Series.sort_index : Sort Series by the index.
DataFrame.sort_values : Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],
... columns=['A'])
>>> df.sort_index()
A
1 4
29 2
100 1
150 5
234 3
By default, it sorts in ascending order, to sort in descending order,
use ``ascending=False``
>>> df.sort_index(ascending=False)
A
234 3
150 5
100 1
29 2
1 4
A key function can be specified which is applied to the index before
sorting. For a ``MultiIndex`` this is applied to each level separately.
>>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])
>>> df.sort_index(key=lambda x: x.str.lower())
a
A 1
b 2
C 3
d 4
"""
return super().sort_index(
axis,
level,
ascending,
inplace,
kind,
na_position,
sort_remaining,
ignore_index,
key,
)
def value_counts(
self,
subset: Optional[Sequence[Hashable]] = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
):
"""
Return a Series containing counts of unique rows in the DataFrame.
.. versionadded:: 1.1.0
Parameters
----------
subset : list-like, optional
Columns to use when counting unique combinations.
normalize : bool, default False
Return proportions rather than frequencies.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
Returns
-------
Series
See Also
--------
Series.value_counts: Equivalent method on Series.
Notes
-----
The returned Series will have a MultiIndex with one level per input
column. By default, rows that contain any NA values are omitted from
the result. By default, the resulting Series will be in descending
order so that the first element is the most frequently-occurring row.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],
... 'num_wings': [2, 0, 0, 0]},
... index=['falcon', 'dog', 'cat', 'ant'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
cat 4 0
ant 6 0
>>> df.value_counts()
num_legs num_wings
4 0 2
2 2 1
6 0 1
dtype: int64
>>> df.value_counts(sort=False)
num_legs num_wings
2 2 1
4 0 2
6 0 1
dtype: int64
>>> df.value_counts(ascending=True)
num_legs num_wings
2 2 1
6 0 1
4 0 2
dtype: int64
>>> df.value_counts(normalize=True)
num_legs num_wings
4 0 0.50
2 2 0.25
6 0 0.25
dtype: float64
"""
if subset is None:
subset = self.columns.tolist()
counts = self.groupby(subset).grouper.size()
if sort:
counts = counts.sort_values(ascending=ascending)
if normalize:
counts /= counts.sum()
# Force MultiIndex for single column
if len(subset) == 1:
counts.index = MultiIndex.from_arrays(
[counts.index], names=[counts.index.name]
)
return counts
def nlargest(self, n, columns, keep: str = "first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
def nsmallest(self, n, columns, keep: str = "first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 337000,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 337000 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "population".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 337000 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
Nauru 337000 182 NR
To order by the smallest values in column "population" and then "GDP", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Nauru 337000 182 NR
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
).nsmallest()
def swaplevel(self, i: Axis = -2, j: Axis = -1, axis: Axis = 0) -> DataFrame:
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to swap levels on. 0 or 'index' for row-wise, 1 or
'columns' for column-wise.
Returns
-------
DataFrame
"""
result = self.copy()
axis = self._get_axis_number(axis)
if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only swap levels on a hierarchical axis.")
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.swaplevel(i, j)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order: Sequence[Axis], axis: Axis = 0) -> DataFrame:
"""
Rearrange index levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : {0 or 'index', 1 or 'columns'}, default 0
Where to reorder levels.
Returns
-------
DataFrame
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic Methods
def _cmp_method(self, other, op):
axis = 1 # only relevant for Series other case
self, other = ops.align_method_FRAME(self, other, axis, flex=False, level=None)
# See GH#4537 for discussion of scalar op behavior
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
def _arith_method(self, other, op):
if ops.should_reindex_frame_op(self, other, op, 1, 1, None, None):
return ops.frame_arith_method_with_reindex(self, other, op)
axis = 1 # only relevant for Series other case
self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None)
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
_logical_method = _arith_method
def _dispatch_frame_op(self, right, func, axis: Optional[int] = None):
"""
Evaluate the frame operation func(left, right) by evaluating
column-by-column, dispatching to the Series implementation.
Parameters
----------
right : scalar, Series, or DataFrame
func : arithmetic or comparison operator
axis : {None, 0, 1}
Returns
-------
DataFrame
"""
# Get the appropriate array-op to apply to each column/block's values.
array_op = ops.get_array_op(func)
right = lib.item_from_zerodim(right)
if not is_list_like(right):
# i.e. scalar, faster than checking np.ndim(right) == 0
bm = self._mgr.apply(array_op, right=right)
return type(self)(bm)
elif isinstance(right, DataFrame):
assert self.index.equals(right.index)
assert self.columns.equals(right.columns)
# TODO: The previous assertion `assert right._indexed_same(self)`
# fails in cases with empty columns reached via
# _frame_arith_method_with_reindex
# TODO operate_blockwise expects a manager of the same type
bm = self._mgr.operate_blockwise(
right._mgr, array_op # type: ignore[arg-type]
)
return type(self)(bm)
elif isinstance(right, Series) and axis == 1:
# axis=1 means we want to operate row-by-row
assert right.index.equals(self.columns)
right = right._values
# maybe_align_as_frame ensures we do not have an ndarray here
assert not isinstance(right, np.ndarray)
arrays = [
array_op(_left, _right)
for _left, _right in zip(self._iter_column_arrays(), right)
]
elif isinstance(right, Series):
assert right.index.equals(self.index) # Handle other cases later
right = right._values
arrays = [array_op(left, right) for left in self._iter_column_arrays()]
else:
# Remaining cases have less-obvious dispatch rules
raise NotImplementedError(right)
return type(self)._from_arrays(
arrays, self.columns, self.index, verify_integrity=False
)
def _combine_frame(self, other: DataFrame, func, fill_value=None):
# at this point we have `self._indexed_same(other)`
if fill_value is None:
# since _arith_op may be called in a loop, avoid function call
# overhead if possible by doing this check once
_arith_op = func
else:
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
new_data = self._dispatch_frame_op(other, _arith_op)
return new_data
def _construct_result(self, result) -> DataFrame:
"""
Wrap the result of an arithmetic, comparison, or logical operation.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
"""
out = self._constructor(result, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
out.columns = self.columns
out.index = self.index
return out
def __divmod__(self, other) -> Tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = self // other
mod = self - div * other
return div, mod
def __rdivmod__(self, other) -> Tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = other // self
mod = other - div * self
return div, mod
# ----------------------------------------------------------------------
# Combination-Related
@doc(
_shared_docs["compare"],
"""
Returns
-------
DataFrame
DataFrame that shows the differences stacked side by side.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
Raises
------
ValueError
When the two DataFrames don't have identical labels or shape.
See Also
--------
Series.compare : Compare with another Series and show differences.
DataFrame.equals : Test whether two objects contain the same elements.
Notes
-----
Matching NaNs will not appear as a difference.
Can only compare identically-labeled
(i.e. same shape, identical row and column labels) DataFrames
Examples
--------
>>> df = pd.DataFrame(
... {{
... "col1": ["a", "a", "b", "b", "a"],
... "col2": [1.0, 2.0, 3.0, np.nan, 5.0],
... "col3": [1.0, 2.0, 3.0, 4.0, 5.0]
... }},
... columns=["col1", "col2", "col3"],
... )
>>> df
col1 col2 col3
0 a 1.0 1.0
1 a 2.0 2.0
2 b 3.0 3.0
3 b NaN 4.0
4 a 5.0 5.0
>>> df2 = df.copy()
>>> df2.loc[0, 'col1'] = 'c'
>>> df2.loc[2, 'col3'] = 4.0
>>> df2
col1 col2 col3
0 c 1.0 1.0
1 a 2.0 2.0
2 b 3.0 4.0
3 b NaN 4.0
4 a 5.0 5.0
Align the differences on columns
>>> df.compare(df2)
col1 col3
self other self other
0 a c NaN NaN
2 NaN NaN 3.0 4.0
Stack the differences on rows
>>> df.compare(df2, align_axis=0)
col1 col3
0 self a NaN
other c NaN
2 self NaN 3.0
other NaN 4.0
Keep the equal values
>>> df.compare(df2, keep_equal=True)
col1 col3
self other self other
0 a c 1.0 1.0
2 b b 3.0 4.0
Keep all original rows and columns
>>> df.compare(df2, keep_shape=True)
col1 col2 col3
self other self other self other
0 a c NaN NaN NaN NaN
1 NaN NaN NaN NaN NaN NaN
2 NaN NaN NaN NaN 3.0 4.0
3 NaN NaN NaN NaN NaN NaN
4 NaN NaN NaN NaN NaN NaN
Keep all original rows and columns and also all original values
>>> df.compare(df2, keep_shape=True, keep_equal=True)
col1 col2 col3
self other self other self other
0 a c 1.0 1.0 1.0 1.0
1 a a 2.0 2.0 2.0 2.0
2 b b 3.0 3.0 3.0 4.0
3 b b NaN NaN 4.0 4.0
4 a a 5.0 5.0 5.0 5.0
""",
klass=_shared_doc_kwargs["klass"],
)
def compare(
self,
other: DataFrame,
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> DataFrame:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def combine(
self, other: DataFrame, func, fill_value=None, overwrite: bool = True
) -> DataFrame:
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unnecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, new_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index, columns=new_columns)
def combine_first(self, other: DataFrame) -> DataFrame:
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def combiner(x, y):
mask = extract_array(isna(x))
x_values = extract_array(x, extract_numpy=True)
y_values = extract_array(y, extract_numpy=True)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
combined = self.combine(other, combiner, overwrite=False)
dtypes = {
col: find_common_type([self.dtypes[col], other.dtypes[col]])
for col in self.columns.intersection(other.columns)
if not is_dtype_equal(combined.dtypes[col], self.dtypes[col])
}
if dtypes:
combined = combined.astype(dtypes)
return combined
def update(
self,
other,
join: str = "left",
overwrite: bool = True,
filter_func=None,
errors: str = "ignore",
) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged:: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-column(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, its name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
raise ValueError("The parameter errors must be either 'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
else:
if errors == "raise":
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unnecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level="Type").mean()
Max Speed
Type
Captive 210.0
Wild 185.0
We can also choose to include NA in group keys or not by setting
`dropna` parameter, the default setting is `True`:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum()
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum()
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
>>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by="a").sum()
b c
a
a 13.0 13.0
b 12.3 123.0
>>> df.groupby(by="a", dropna=False).sum()
b c
a
a 13.0 13.0
b 12.3 123.0
NaN 12.3 33.0
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis: Axis = 0,
level: Optional[Level] = None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = no_default,
observed: bool = False,
dropna: bool = True,
) -> DataFrameGroupBy:
from pandas.core.groupby.generic import DataFrameGroupBy
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
dropna=dropna,
)
_shared_docs[
"pivot"
] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : str or object or a list of str, optional
Column to use to make new frame's index. If None, uses
existing index.
.. versionchanged:: 1.1.0
Also accept list of index names.
columns : str or object or a list of str
Column to use to make new frame's columns.
.. versionchanged:: 1.1.0
Also accept list of columns names.
values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
You could also assign a list of column names or a list of index names.
>>> df = pd.DataFrame({
... "lev1": [1, 1, 1, 2, 2, 2],
... "lev2": [1, 1, 2, 1, 1, 2],
... "lev3": [1, 2, 1, 2, 1, 2],
... "lev4": [1, 2, 3, 4, 5, 6],
... "values": [0, 1, 2, 3, 4, 5]})
>>> df
lev1 lev2 lev3 lev4 values
0 1 1 1 1 0
1 1 1 2 2 1
2 1 2 1 3 2
3 2 1 2 4 3
4 2 1 1 5 4
5 2 2 2 6 5
>>> df.pivot(index="lev1", columns=["lev2", "lev3"],values="values")
lev2 1 2
lev3 1 2 1 2
lev1
1 0.0 1.0 2.0 NaN
2 4.0 3.0 NaN 5.0
>>> df.pivot(index=["lev1", "lev2"], columns=["lev3"],values="values")
lev3 1 2
lev1 lev2
1 1 0.0 1.0
2 2.0 NaN
2 1 4.0 3.0
2 NaN 5.0
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution("")
@Appender(_shared_docs["pivot"])
def pivot(self, index=None, columns=None, values=None) -> DataFrame:
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs[
"pivot_table"
] = """
Create a spreadsheet-style pivot table as a DataFrame.
The levels in the pivot table will be stored in MultiIndex objects
(hierarchical indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with (in the resulting pivot table,
after aggregation).
margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals).
dropna : bool, default True
Do not include columns whose entries are all NaN.
margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionchanged:: 0.25.0
Returns
-------
DataFrame
An Excel style pivot table.
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
DataFrame.melt: Unpivot a DataFrame from wide to long format,
optionally leaving identifiers set.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9.0 7.500000 6.0
small 5.500000 9.0 8.500000 8.0
foo large 2.000000 5.0 4.500000 4.0
small 2.333333 6.0 4.333333 2.0
"""
@Substitution("")
@Appender(_shared_docs["pivot_table"])
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
) -> DataFrame:
from pandas.core.reshape.pivot import pivot_table
return pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
def stack(self, level: Level = -1, dropna: bool = True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
result = stack_multiple(self, level, dropna=dropna)
else:
result = stack(self, level, dropna=dropna)
return result.__finalize__(self, method="stack")
def explode(
self, column: Union[str, Tuple], ignore_index: bool = False
) -> DataFrame:
"""
Transform each element of a list-like to a row, replicating index values.
.. versionadded:: 0.25.0
Parameters
----------
column : str or tuple
Column to explode.
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
Raises
------
ValueError :
if columns of the frame are not unique.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of rows in the
output will be non-deterministic when exploding sets.
Examples
--------
>>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 foo 1
2 [] 1
3 [3, 4] 1
>>> df.explode('A')
A B
0 1 1
0 2 1
0 3 1
1 foo 1
2 NaN 1
3 3 1
3 4 1
"""
if not (is_scalar(column) or isinstance(column, tuple)):
raise ValueError("column must be a scalar")
if not self.columns.is_unique:
raise ValueError("columns must be unique")
df = self.reset_index(drop=True)
result = df[column].explode()
result = df.drop([column], axis=1).join(result)
if ignore_index:
result.index = ibase.default_index(len(result))
else:
result.index = self.index.take(result.index)
result = result.reindex(columns=self.columns, copy=False)
return result
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
Parameters
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
result = unstack(self, level, fill_value)
return result.__finalize__(self, method="unstack")
@Appender(_shared_docs["melt"] % {"caller": "df.melt(", "other": "melt"})
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level: Optional[Level] = None,
ignore_index=True,
) -> DataFrame:
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
ignore_index=ignore_index,
)
# ----------------------------------------------------------------------
# Time series-related
@doc(
Series.diff,
klass="Dataframe",
extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n "
"Take difference over rows (0) or columns (1).\n",
other_klass="Series",
examples=dedent(
"""
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0 0
1 NaN -1 3
2 NaN -1 7
3 NaN -1 13
4 NaN 0 20
5 NaN 2 28
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
Overflow in input dtype
>>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)
>>> df.diff()
a
0 NaN
1 255.0"""
),
)
def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:
if not isinstance(periods, int):
if not (is_float(periods) and periods.is_integer()):
raise ValueError("periods must be an integer")
periods = int(periods)
bm_axis = self._get_block_manager_axis(axis)
if bm_axis == 0 and periods != 0:
return self - self.shift(periods, axis=axis)
new_data = self._mgr.diff(n=periods, axis=bm_axis)
return self._constructor(new_data).__finalize__(self, "diff")
# ----------------------------------------------------------------------
# Function application
def _gotitem(
self,
key: IndexLabel,
ndim: int,
subset: Optional[FrameOrSeriesUnion] = None,
) -> FrameOrSeriesUnion:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent(
"""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.ExponentialMovingWindow : Perform operation over exponential weighted
window.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
sum 12.0 NaN
min 1.0 2.0
max NaN 8.0
Aggregate different functions over the columns and rename the index of the resulting
DataFrame.
>>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))
A B C
x 7.0 NaN NaN
y NaN 2.0 NaN
z NaN NaN 6.0
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
"""
)
@doc(
_shared_docs["aggregate"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
)
def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs):
axis = self._get_axis_number(axis)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
result = None
try:
result, how = self._aggregate(func, axis, *args, **kwargs)
except TypeError as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
if relabeling:
# This is to keep the order to columns occurrence unchanged, and also
# keep the order of new columns occurrence unchanged
# For the return values of reconstruct_func, if relabeling is
# False, columns and order will be None.
assert columns is not None
assert order is not None
result_in_dict = relabel_result(result, func, columns, order)
result = DataFrame(result_in_dict, index=columns)
return result
def _aggregate(self, arg, axis: Axis = 0, *args, **kwargs):
from pandas.core.apply import frame_apply
op = frame_apply(
self if axis == 0 else self.T,
func=arg,
axis=0,
args=args,
kwds=kwargs,
)
result, how = op.agg()
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result = result.T if result is not None else result
return result, how
agg = aggregate
@doc(
_shared_docs["transform"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
def transform(
self, func: AggFuncType, axis: Axis = 0, *args, **kwargs
) -> DataFrame:
result = transform(self, func, axis, *args, **kwargs)
assert isinstance(result, DataFrame)
return result
def apply(
self,
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
result_type=None,
args=(),
**kwds,
):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
raw : bool, default False
Determines if row or column is passed as a Series or ndarray object:
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing ``result_type='expand'`` will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(
self,
func=func,
axis=axis,
raw=raw,
result_type=result_type,
args=args,
kwds=kwds,
)
return op.apply()
def applymap(
self, func: PythonFuncType, na_action: Optional[str] = None
) -> DataFrame:
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
na_action : {None, 'ignore'}, default None
If ‘ignore’, propagate NaN values, without passing them to func.
.. versionadded:: 1.2
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Like Series.map, NA values can be ignored:
>>> df_copy = df.copy()
>>> df_copy.iloc[0, 0] = pd.NA
>>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')
0 1
0 <NA> 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
if na_action not in {"ignore", None}:
raise ValueError(
f"na_action must be 'ignore' or None. Got {repr(na_action)}"
)
ignore_na = na_action == "ignore"
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func, ignore_na=ignore_na)
return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na)
return self.apply(infer).__finalize__(self, "applymap")
# ----------------------------------------------------------------------
# Merging / joining methods
def append(
self,
other,
ignore_index: bool = False,
verify_integrity: bool = False,
sort: bool = False,
) -> DataFrame:
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default False
Sort columns if the columns of `self` and `other` are not aligned.
.. versionchanged:: 1.0.0
Changed to not sort by default.
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
if not ignore_index:
raise TypeError("Can only append a dict if ignore_index=True")
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = (
other.reindex(combined_columns, copy=False)
.to_frame()
.T.infer_objects()
.rename_axis(index.names, copy=False)
)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self, *other]
else:
to_concat = [self, other]
return (
concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
).__finalize__(self, method="append")
def join(
self,
other: FrameOrSeriesUnion,
on: Optional[IndexLabel] = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
sort: bool = False,
) -> DataFrame:
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-column(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
"""
return self._join_compat(
other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort
)
def _join_compat(
self,
other: FrameOrSeriesUnion,
on: Optional[IndexLabel] = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
sort: bool = False,
):
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import merge
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
if how == "cross":
return merge(
self,
other,
how=how,
on=on,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
return merge(
self,
other,
left_on=on,
how=how,
left_index=on is None,
right_index=True,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
else:
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == "left":
res = concat(
frames, axis=1, join="outer", verify_integrity=True, sort=sort
)
return res.reindex(self.index, copy=False)
else:
return concat(
frames, axis=1, join=how, verify_integrity=True, sort=sort
)
joined = frames[0]
for frame in frames[1:]:
joined = merge(
joined, frame, how=how, left_index=True, right_index=True
)
return joined
@Substitution("")
@Appender(_merge_doc, indents=2)
def merge(
self,
right: FrameOrSeriesUnion,
how: str = "inner",
on: Optional[IndexLabel] = None,
left_on: Optional[IndexLabel] = None,
right_on: Optional[IndexLabel] = None,
left_index: bool = False,
right_index: bool = False,
sort: bool = False,
suffixes: Suffixes = ("_x", "_y"),
copy: bool = True,
indicator: bool = False,
validate: Optional[str] = None,
) -> DataFrame:
from pandas.core.reshape.merge import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
def round(self, decimals=0, *args, **kwargs) -> DataFrame:
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = list(_dict_round(self, decimals))
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
if len(new_cols) > 0:
return self._constructor(
concat(new_cols, axis=1), index=self.index, columns=self.columns
)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method="pearson", min_periods=1) -> DataFrame:
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior.
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Series.corr : Compute the correlation between two Series.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if method == "pearson":
correl = libalgos.nancorr(mat, minp=min_periods)
elif method == "spearman":
correl = libalgos.nancorr_spearman(mat, minp=min_periods)
elif method == "kendall":
correl = libalgos.nancorr_kendall(mat, minp=min_periods)
elif callable(method):
if min_periods is None:
min_periods = 1
mat = mat.T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.0
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
return self._constructor(correl, index=idx, columns=cols)
def cov(
self, min_periods: Optional[int] = None, ddof: Optional[int] = 1
) -> DataFrame:
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-ddof.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
base_cov = np.empty((mat.shape[1], mat.shape[1]))
base_cov.fill(np.nan)
else:
base_cov = np.cov(mat.T, ddof=ddof)
base_cov = base_cov.reshape((len(cols), len(cols)))
else:
base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods)
return self._constructor(base_cov, index=idx, columns=cols)
def corrwith(self, other, axis: Axis = 0, drop=False, method="pearson") -> Series:
"""
Compute pairwise correlation.
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr : Compute pairwise correlation of columns.
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
left = left.T
right = right.T
if method == "pearson":
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ["kendall", "spearman"] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = self._constructor_sliced(
map(c, zip(left.values.T, right.values.T)), index=left.columns
)
else:
raise ValueError(
f"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable"
)
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(
self, axis: Axis = 0, level: Optional[Level] = None, numeric_only: bool = False
):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each row.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.value_counts: Count unique combinations of columns.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis, numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._mgr.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = self._constructor_sliced(
counts, index=frame._get_agg_axis(axis)
)
return result.astype("int64")
def _count_level(self, level: Level, axis: Axis = 0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError(
f"Can only count levels on hierarchical {self._get_axis_name(axis)}."
)
# Mask NaNs: Mask rows or columns where the index level is NaN, and all
# values in the DataFrame that are NaN
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
values_mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
values_mask = notna(frame.values)
index_mask = notna(count_axis.get_level_values(level=level))
if axis == 1:
mask = index_mask & values_mask
else:
mask = index_mask.reshape(-1, 1) & values_mask
if isinstance(level, str):
level = count_axis._get_level_number(level)
level_name = count_axis._names[level]
level_index = count_axis.levels[level]._shallow_copy(name=level_name)
level_codes = ensure_int64(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)
if axis == 1:
result = self._constructor(counts, index=agg_axis, columns=level_index)
else:
result = self._constructor(counts, index=level_index, columns=agg_axis)
return result
def _reduce(
self,
op,
name: str,
*,
axis: Axis = 0,
skipna: bool = True,
numeric_only: Optional[bool] = None,
filter_type=None,
**kwds,
):
assert filter_type is None or filter_type == "bool", filter_type
out_dtype = "bool" if filter_type == "bool" else None
own_dtypes = [arr.dtype for arr in self._iter_column_arrays()]
dtype_is_dt = np.array(
[is_datetime64_any_dtype(dtype) for dtype in own_dtypes],
dtype=bool,
)
if numeric_only is None and name in ["mean", "median"] and dtype_is_dt.any():
warnings.warn(
"DataFrame.mean and DataFrame.median with numeric_only=None "
"will include datetime64 and datetime64tz columns in a "
"future version.",
FutureWarning,
stacklevel=5,
)
cols = self.columns[~dtype_is_dt]
self = self[cols]
# TODO: Make other agg func handle axis=None properly GH#21597
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
assert axis in [0, 1]
def func(values: np.ndarray):
# We only use this in the case that operates on self.values
return op(values, axis=axis, skipna=skipna, **kwds)
def blk_func(values, axis=1):
if isinstance(values, ExtensionArray):
return values._reduce(name, skipna=skipna, **kwds)
else:
return op(values, axis=axis, skipna=skipna, **kwds)
def _get_data() -> DataFrame:
if filter_type is None:
data = self._get_numeric_data()
else:
# GH#25101, GH#24434
assert filter_type == "bool"
data = self._get_bool_data()
return data
if numeric_only is not None or axis == 0:
# For numeric_only non-None and axis non-None, we know
# which blocks to use and no try/except is needed.
# For numeric_only=None only the case with axis==0 and no object
# dtypes are unambiguous can be handled with BlockManager.reduce
# Case with EAs see GH#35881
df = self
if numeric_only is True:
df = _get_data()
if axis == 1:
df = df.T
axis = 0
ignore_failures = numeric_only is None
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager.reduce
res, indexer = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)
out = df._constructor(res).iloc[0]
if out_dtype is not None:
out = out.astype(out_dtype)
if axis == 0 and len(self) == 0 and name in ["sum", "prod"]:
# Even if we are object dtype, follow numpy and return
# float64, see test_apply_funcs_over_empty
out = out.astype(np.float64)
return out
assert numeric_only is None
data = self
values = data.values
try:
result = func(values)
except TypeError:
# e.g. in nanops trying to convert strs to float
data = _get_data()
labels = data._get_agg_axis(axis)
values = data.values
with np.errstate(all="ignore"):
result = func(values)
if filter_type == "bool" and notna(result).all():
result = result.astype(np.bool_)
elif filter_type is None and is_object_dtype(result.dtype):
try:
result = result.astype(np.float64)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
pass
result = self._constructor_sliced(result, index=labels)
return result
def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series:
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis: Axis = 0, skipna: bool = True) -> Series:
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin : Return index of the minimum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the minimum value in each column.
>>> df.idxmin()
consumption Pork
co2_emissions Wheat Products
dtype: object
To return the index for the minimum value in each row, use ``axis="columns"``.
>>> df.idxmin(axis="columns")
Pork consumption
Wheat Products co2_emissions
Beef consumption
dtype: object
"""
axis = self._get_axis_number(axis)
res = self._reduce(
nanops.nanargmin, "argmin", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def idxmax(self, axis: Axis = 0, skipna: bool = True) -> Series:
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of maxima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax : Return index of the maximum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the maximum value in each column.
>>> df.idxmax()
consumption Wheat Products
co2_emissions Beef
dtype: object
To return the index for the maximum value in each row, use ``axis="columns"``.
>>> df.idxmax(axis="columns")
Pork co2_emissions
Wheat Products consumption
Beef co2_emissions
dtype: object
"""
axis = self._get_axis_number(axis)
res = self._reduce(
nanops.nanargmax, "argmax", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num: int) -> Index:
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})")
def mode(
self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True
) -> DataFrame:
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. Because the resulting DataFrame has two rows,
the second row of ``species`` and ``legs`` contains ``NaN``.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
data = data.apply(f, axis=axis)
# Ensure index is type stable (should always use int index)
if data.empty:
data.index = ibase.default_index(0)
return data
def quantile(
self,
q=0.5,
axis: Axis = 0,
numeric_only: bool = True,
interpolation: str = "linear",
):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'}, default 0
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
validate_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
if is_list_like(q):
return self._constructor([], index=q, columns=cols)
return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)
result = data._mgr.quantile(
qs=q, axis=1, interpolation=interpolation, transposed=is_transposed
)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
@doc(NDFrame.asfreq, **_shared_doc_kwargs)
def asfreq(
self,
freq,
method=None,
how: Optional[str] = None,
normalize: bool = False,
fill_value=None,
) -> DataFrame:
return super().asfreq(
freq=freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
@doc(NDFrame.resample, **_shared_doc_kwargs)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: Optional[int] = None,
on=None,
level=None,
origin: Union[str, TimestampConvertibleTypes] = "start_day",
offset: Optional[TimedeltaConvertibleTypes] = None,
) -> Resampler:
return super().resample(
rule=rule,
axis=axis,
closed=closed,
label=label,
convention=convention,
kind=kind,
loffset=loffset,
base=base,
on=on,
level=level,
origin=origin,
offset=offset,
)
def to_timestamp(
self, freq=None, how: str = "start", axis: Axis = 0, copy: bool = True
) -> DataFrame:
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, PeriodIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_timestamp(freq=freq, how=how)
setattr(new_obj, axis_name, new_ax)
return new_obj
def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame:
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with PeriodIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, DatetimeIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_period(freq=freq)
setattr(new_obj, axis_name, new_ax)
return new_obj
def isin(self, values) -> DataFrame:
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat(
(
self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)
),
axis=1,
)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError(
"only list-like or dict-like objects are allowed "
"to be passed to DataFrame.isin(), "
f"you passed a '{type(values).__name__}'"
)
return self._constructor(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
self.index,
self.columns,
)
# ----------------------------------------------------------------------
# Add index and columns
_AXIS_ORDERS = ["index", "columns"]
_AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {
**NDFrame._AXIS_TO_AXIS_NUMBER,
1: 1,
"columns": 1,
}
_AXIS_REVERSED = True
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 1
_info_axis_name = "columns"
index: Index = properties.AxisProperty(
axis=1, doc="The index (row labels) of the DataFrame."
)
columns: Index = properties.AxisProperty(
axis=0, doc="The column labels of the DataFrame."
)
@property
def _AXIS_NUMBERS(self) -> Dict[str, int]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NUMBERS
return {"index": 0, "columns": 1}
@property
def _AXIS_NAMES(self) -> Dict[int, str]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NAMES
return {0: "index", 1: "columns"}
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
hist = pandas.plotting.hist_frame
boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
DataFrame._add_numeric_operations()
ops.add_flex_arithmetic_methods(DataFrame)
def _from_nested_dict(data) -> collections.defaultdict:
new_data: collections.defaultdict = collections.defaultdict(dict)
for index, s in data.items():
for col, v in s.items():
new_data[col][index] = v
return new_data
def _reindex_for_setitem(value: FrameOrSeriesUnion, index: Index) -> ArrayLike:
# reindex if necessary
if value.index.equals(index) or not len(index):
return value._values.copy()
# GH#4107
try:
reindexed_value = value.reindex(index)._values
except ValueError as err:
# raised in MultiIndex.from_tuples, see test_insert_error_msmgs
if not value.index.is_unique:
# duplicate axis
raise err
raise TypeError(
"incompatible index of inserted column with frame index"
) from err
return reindexed_value
def _maybe_atleast_2d(value):
# TODO(EA2D): not needed with 2D EAs
if is_extension_array_dtype(value):
return value
return np.atleast_2d(np.asarray(value))
| bsd-3-clause |
ch3ll0v3k/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/pylab_examples/contour_label_demo.py | 3 | 2238 | #!/usr/bin/env python
"""
Illustrate some of the more advanced things that one can do with
contour labels.
See also contour_demo.py.
"""
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
##################################################
# Define our surface
##################################################
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
##################################################
# Make contour labels using creative float classes
# Follows suggestion of Manuel Metz
##################################################
plt.figure()
# Basic contour plot
CS = plt.contour(X, Y, Z)
# Define a class that forces representation of float to look a certain way
# This remove trailing zero so '1.0' becomes '1'
class nf(float):
def __repr__(self):
str = '%.1f' % (self.__float__(),)
if str[-1]=='0':
return '%.0f' % self.__float__()
else:
return '%.1f' % self.__float__()
# Recast levels to new class
CS.levels = [nf(val) for val in CS.levels ]
# Label levels with specially formatted floats
plt.clabel(CS, CS.levels, inline=True, fmt='%r %%', fontsize=10)
##################################################
# Label contours with arbitrary strings using a
# dictionary
##################################################
plt.figure()
# Basic contour plot
CS = plt.contour(X, Y, Z)
fmt = {}
strs = [ 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh' ]
for l,s in zip( CS.levels, strs ):
fmt[l] = s
# Label every other level using strings
plt.clabel(CS,CS.levels[::2],inline=True,fmt=fmt,fontsize=10)
# Use a Formatter
plt.figure()
CS = plt.contour(X, Y, 100**Z, locator=plt.LogLocator())
fmt = ticker.LogFormatterMathtext()
fmt.create_dummy_axis()
plt.clabel(CS, CS.levels, fmt=fmt)
plt.title("$100^Z$")
plt.show()
| gpl-2.0 |
zuku1985/scikit-learn | sklearn/linear_model/bayes.py | 14 | 19671 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = (1. / alpha_) * sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
ARD is a little different than the slide: only dimensions/features for
which self.lambda_ < self.threshold_lambda are kept and the rest are
discarded.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
X = X[:, self.lambda_ < self.threshold_lambda]
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
| bsd-3-clause |
hande-qmc/hande | tools/reblock_hande.py | 1 | 14236 | #!/usr/bin/env python
'''Run a reblocking analysis on HANDE QMC output files. Files may be compressed
with either gzip, bzip2 or xz (python 3 only). CCMC and FCIQMC calculations
only are supported; other calculations have specific analysis scripts and/or
should be analysed directly with pyhande.'''
import argparse
import os
import pkgutil
import pprint
import sys
import pandas as pd
_script_dir = os.path.abspath(os.path.dirname(__file__))
if not pkgutil.find_loader('pyblock'):
sys.path.append(os.path.join(_script_dir, 'pyblock'))
if not pkgutil.find_loader('pyhande'):
sys.path.append(os.path.join(_script_dir, 'pyhande'))
import pyblock
import pyhande
def run_hande_blocking(files, start_iteration=None, end_iteration=None,
reblock_plot=None, verbose=1, width=0,
out_method='to_string', inefficiency=False,
reweight_plot=False, extract_rl_time=False,
analysis_method=None, warmup_detection=None):
'''Run a reblocking analysis on HANDE output and print to STDOUT.
See :func:`pyblock.pd_utils.reblock` and :func:`pyblock.blocking.reblock` for
details on the reblocking procedure.
Parameters
----------
files : list of list of strings
names of files containing HANDE QMC calculation output. Each list contains
the a set of files which are analysed together (ie a series of calculations
restarted from the previous calculation).
start_iteration : int or None (Default)
QMC iteration from which statistics are gathered. While the end_iteration
is included in analysis, the start_iteration is not.
end_iteration : int or None (Default)
QMC iteration until which statistics are gathered. If None, the last QMC
iteration included is the last iteration of the data set.
reblock_plot : string
Filename to which the reblocking convergence plot (standard error vs reblock
iteration) is saved. The plot is not created if None and shown
interactively if '-'.
verbose : int
Level of verbosity.
<0: print nothing
0: print only the estimate from the optimal block length.
1: print only the recommended statistics from the optimal block length.
2: print search for automatic starting iteration (if required), blocking
analysis and recommended statistics.
3: print calculation metadata, search for automatic starting iteration
(if required), blocking analysis and recommended statistics.
width : int
Maximum width (in characters) of lines to print out for
:class:`pandas.DataFrame` objects; exceeding this results in line wrapping.
A non-positive value corresponds to disabling line wrapping.
out_method : string
Output method for printing out tables. Either 'to_string' to print a
space-separate table or 'to_csv' to print a CSV table.
inefficiency : bool
Attempt to calculate the inefficiency factor for the calculations, and
include it in the output.
reweight_plot: do reweighting the projected energy and show plot to determine
population bias.
extract_rl_time: extract times taken for a report loop and find mean and errors.
Returns
-------
info :
Output from :func:`pyhande.lazy.std_analysis`.
opt_block: :class:`pandas.DataFrame`
Recommended statistics based upon the estimated 'optimal' block size
as suggested by Wolff and Lee et al. (see
:func:`pyblock.blocking.find_optimal_block`).
'''
try:
float_str = '%-#.8e'
float_fmt = '{0:-#.8e}'.format
float_fmt(1.0)
except ValueError:
# GAH. Alternate formatting only added to format function after
# python 2.6..
float_str = '%-.8e'
float_fmt = '{0:-.8e}'.format
def df_to_x(df, out_method, float_fmt, float_str, width):
tbl_fn = getattr(df, out_method)
try:
return tbl_fn(float_format=float_fmt, na_rep='n/a',
line_width=width)
except TypeError:
# Annoyingly to_csv and to_string take different types for
# their float_fmt arguments.
# See: https://github.com/pydata/pandas/issues/9448
return tbl_fn(float_format=float_str, na_rep='n/a',
line_width=width)
if width <= 0:
width = None
# verbosity levels
v_silent = -1
(v_estimate, v_rec_stats, v_analysis, v_meta, v_input) = (0, 1, 2, 3, 4)
infos = []
indices = []
for calc in files:
try:
info = pyhande.lazy.std_analysis(calc, start_iteration,
end=end_iteration,
extract_psips=True,
calc_inefficiency=inefficiency,
verbosity = verbose,
extract_rep_loop_time=extract_rl_time,
analysis_method=analysis_method,
warmup_detection=warmup_detection)
for (i, i_info) in enumerate(info):
if verbose >= v_analysis:
msg = 'Analysing file(s): %s.' % (' '.join(calc))
if len(info) > 1:
msg += '\nCalculation: %i.' % (i,)
msg += ('\nReblocking from iteration: %i.' %
(i_info.metadata['pyhande']['reblock_start'],))
print(msg)
if verbose >= v_meta:
md = i_info.metadata
calc_type = md.pop('calc_type')
calc_input = md.pop('input')
print('calc_type: %s.\n' % (calc_type))
pprint.pprint(md)
if verbose >= v_input:
print('\nFull input options:\n%s' % '\n'.join(calc_input))
print('')
if verbose >= v_analysis:
print(df_to_x(i_info.reblock, out_method, float_fmt, float_str,
width))
print('')
infos.extend(info)
if len(info) == 1:
indices.append(','.join(calc))
else:
indices.extend((','.join(calc),i) for i in range(len(info)))
if reweight_plot:
pyhande.lazy.reweighting_graph(calc, start=start_iteration,
verbosity=verbose)
except ValueError:
print('WARNING: No data found in file '+' '.join(calc)+'.')
except RuntimeError as err:
print('WARNING: Reblocking failed for file '+' '.join(calc)+' with error '+format(err)+'.')
opt_blocks = [pd.DataFrame(data=
{'iteration':info.metadata['pyhande']['reblock_start']},
index = ['Block from']).append(info.opt_block) for info in infos]
if verbose < v_rec_stats:
for opt_block in opt_blocks:
if not opt_block.empty:
levels = ['mean', 'standard error', 'standard error error']
for level in levels:
opt_block.drop(level, axis=1, inplace=True)
opt_blocks = [opt_block.stack() for opt_block in opt_blocks]
opt_block = pd.DataFrame(dict(zip(indices, opt_blocks))).T
if verbose < v_rec_stats and not opt_block.empty:
opt_block.columns = opt_block.columns.droplevel(1)
if not opt_block.empty and verbose > v_silent:
print('Recommended statistics from optimal block size:')
print('')
print(df_to_x(opt_block, out_method, float_fmt, float_str, width))
for (calc, info) in zip(indices, infos):
if info.no_opt_block and verbose > v_silent:
fnames = ''
if (len(indices) > 1):
try:
fnames = ' in ' + calc.replace(',',' ')
except AttributeError:
# if there is more than one calculation in the file calc is a tuple
fnames = ' in ' + calc[0] + ' ' + str(calc[1])
if (analysis_method != 'hybrid'):
print('WARNING: could not find optimal block size%s.' % (fnames))
print('Insufficient statistics collected for the following '
'variables: %s.' % (', '.join(info.no_opt_block)))
if reblock_plot:
for info in infos:
pyblock.plot.plot_reblocking(info.reblock, reblock_plot)
return infos
def parse_args(args):
'''Parse command-line arguments.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
(filenames, start_iteration, reblock_plot)
where
filenames : list of strings
list of QMC output files
start_iteration : int
iteration number from which statistics should be gathered.
reblock_plot : string
filename for the reblock convergence plot output.
'''
try:
cols = pd.core.series.get_terminal_size()[0]
except AttributeError:
try:
cols = pd.util.terminal.get_terminal_size()[0]
except AttributeError:
# terminal module moved in pandas 0.20
cols = pd.io.formats.terminal.get_terminal_size()[0]
if not sys.stdout.isatty():
cols = -1
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('-m', '--merge', default=False, action='store_true',
help='Combine data from each file before analysing. '
'Separate calculations can be denoted by placing \'--\''
' between groups of files. Default: treat each file as'
' an independent calculation.')
parser.add_argument('-o', '--output', default='txt', choices=['txt', 'csv'],
help='Format for data table. Default: %(default)s.')
parser.add_argument('-p', '--plot', default=None, dest='plotfile',
help='Filename to which the reblocking convergence plot '
'is saved. Use \'-\' to show plot interactively. '
'Default: off.')
parser.add_argument('-r', '--reweight', default=False, dest='reweight_plot',
action='store_true', help='For each independent passed '
'calculation show a reweighting plot')
parser.add_argument('-q', '--quiet', dest='verbose', action='store_const',
const=0, default=1,
help='Output only the final summary table. '
'Overrides --verbose.')
parser.add_argument('-s', '--start', type=int, dest='start_iteration',
default=None, help='Iteration number from which to '
'gather statistics. The start iteration itself is not '
'included in the analysis. Default: Try finding '
'starting iteration automatically. ')
parser.add_argument('-e', '--end', type=int, dest='end_iteration',
default=None, help='Iteration number until which to '
'gather statistics. Default: Last iteration in data '
'set. ')
parser.add_argument('-v', '--verbose', dest='verbose', action='count',
default=1, help='Increase verbosity of the output. Can '
'be specified multiple times.')
parser.add_argument('-w', '--width', type=int, default=cols,
help='Width (in characters) of data to print out '
'before wrapping them. A non-positive value disables '
'wrapping. Default: current terminal width if printing '
'to a terminal, -1 if redirecting.')
parser.add_argument('-i','--inefficiency', default=False, action='store_true',
help='Calculate the inefficiency factor for the calculation '
'if possible.')
parser.add_argument('-t','--extract_rl_time', default=False, action='store_true',
help='Find the mean time taken for a report loop.')
parser.add_argument('-a','--analysis_method', dest='analysis_method',
default='reblocking', choices=['reblocking','hybrid'],
help='Designate the post-analysis method '
'to estimate the statistic error. Default: %(default)s')
parser.add_argument('-b','--warmup_detection', dest='warmup_detection',
default='hande_org', choices=['hande_org','mser_min'],
help='Designate the method to determine '
'the starting iterations to be discarded before calculating '
'the statistic error. Default: %(default)s')
parser.add_argument('filenames', nargs=argparse.REMAINDER,
help='Space-separated list of files to analyse.')
options = parser.parse_args(args)
if not options.filenames:
parser.print_help()
sys.exit(1)
if options.merge:
merged = [[]]
for fname in options.filenames:
if fname == '--':
if merged[-1]:
merged.append([])
else:
merged[-1].append(fname)
options.filenames = merged
else:
options.filenames = [[fname] for fname in options.filenames]
out_methods = {'txt': 'to_string', 'csv': 'to_csv'}
options.output = out_methods[options.output]
return options
def main(args):
'''Run reblocking and data analysis on HANDE output.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
None.
'''
options = parse_args(args)
run_hande_blocking(options.filenames, options.start_iteration,
options.end_iteration, options.plotfile,
options.verbose, options.width, options.output,
options.inefficiency, options.reweight_plot,
options.extract_rl_time,
options.analysis_method, options.warmup_detection)
if __name__ == '__main__':
main(sys.argv[1:])
| lgpl-2.1 |
waterponey/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 81 | 5461 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
Twangist/log_calls | tests/test_log_calls.py | 1 | 82876 | __author__ = "Brian O'Neill"
__version__ = '0.3.0'
from log_calls import log_calls
import doctest
import unittest
#############################################################################
# doctests
#############################################################################
def main_basic():
"""
##[Basic usage](id:Basic-usage)
`log_calls` has many features, and thus many, mostly independent, keyword parameters
(14 in all). This section introduces all but four of them, one at a time,
though of course you can use multiple parameters in any call to the decorator::
* [`enabled`](#enabled-parameter)
* [`args_sep`](#args_sep-parameter)
* [`log_args`](#log_args-parameter)
* [`log_retval`](#log_retval-parameter)
* [`log_exit`](#log_exit-parameter)
* [`log_call_numbers`](#log_call_numbers-parameter)
* [`log_elapsed`](#log_elapsed-parameter)
* [`indent`](#indent-parameter)
* [`prefix`](#prefix-parameter)
* [`file`](#file-parameter)
The two parameters that let you output `log_calls` messages to a `Logger`
([`logger`](#logger-parameter) and [`loglevel`](#loglevel-parameter))
are discussed in [Using loggers](#Logging). The two that determine whether
call history is retained ([record_history](#record_history-parameter)),
and then just how much([max_history](#max_history-parameter)), are discussed
in [Call history and statistics – the *stats* attribute and the *\*_history* parameters](#call-history-and-statistics).
Every example in this document uses `log_calls`, so without further ado:
>>> from log_calls import log_calls
###[Using no parameters](id:No-parameters)
First, let's see the simplest possible examples, using no parameters at all:
>>> @log_calls()
... def f(a, b, c):
... pass
>>> f(1, 2, 3)
f <== called by <module>
arguments: a=1, b=2, c=3
f ==> returning to <module>
Adding another decorated function to the call chain gives useful information too:
>>> @log_calls()
... def g(a):
... f(a, 2*a, 3*a)
>>> g(3)
g <== called by <module>
arguments: a=3
f <== called by g
arguments: a=3, b=6, c=9
f ==> returning to g
g ==> returning to <module>
###[The *enabled* parameter (default – *True*)](id:enabled-parameter)
The next most basic example:
>>> @log_calls(enabled=False)
... def f(a, b, c):
... pass
>>> f(1, 2, 3) # no output
The `enabled` setting is in fact an `int`. (Later, for example in
[Using *enabled* as a level of verbosity](#enabling-with-ints),
we show how this can be used advantageously.)
#### [True bypass](id:bypass)
If you supply a negative integer,
that is interpreted as *true bypass*: `log_calls` immediately calls
the decorated function and returns its value. When the value of `enabled`
is false (`False` or `0`), the decorator performs a little more processing
before deferring to the decorated function, though of course less than when
`enabled` is positive (e.g. `True`).
###[The *args_sep* parameter (default – `', '`)](id:args_sep-parameter)
The `args_sep` parameter specifies the character or string used to separate
arguments. If the string ends in (or is) `\n`, additional whitespace
is appended so that arguments line up nicely:
>>> @log_calls(args_sep='\\n')
... def f(a, b, c, **kwargs):
... print(a + b + c)
>>> f(1, 2, 3, u='you') # doctest: +NORMALIZE_WHITESPACE, +SKIP
f <== called by <module>
arguments:
a=1
b=2
c=3
**kwargs={'u': 'you'}
6
f ==> returning to <module>
**NOTE**: *In all the doctest examples in this document, you'll see* `'\\n'`
*where in actual code you'd write* `'\n'`. *This is a `doctest` quirk: all
the examples herein work (as tests, they pass), and they would fail if*
`'\n'` *were used. The only alternative would be to use raw character strings
and write* `r'\n'`, *which is not obviously better.*
###[The *log_args* parameter (default – *True*)](id:log_args-parameter)
When true, as seen above, arguments passed to the decorated function are
logged. If the function's signature contains positional and/or keyword
"varargs" (`*args` and/or `**kwargs`), these are included if they're nonempty.
Any default values of keyword parameters with no corresponding argument are also
logged, on a separate line.
>>> @log_calls()
... def f_a(a, *args, something='that thing', **kwargs): pass
>>> f_a(1, 2, 3, foo='bar')
f_a <== called by <module>
arguments: a=1, *args=(2, 3), **kwargs={'foo': 'bar'}
defaults: something='that thing'
f_a ==> returning to <module>
Here, no argument information is logged at all:
>>> @log_calls(log_args=False)
... def f_b(a, *args, something='that thing', **kwargs): pass
>>> f_b(1, 2, 3, foo='bar')
f_b <== called by <module>
f_b ==> returning to <module>
If a function has no parameters, `log_calls` won't display any "arguments"
section:
>>> @log_calls()
... def f(): pass
>>> f()
f <== called by <module>
f ==> returning to <module>
If a function has parameters but is passed no arguments, `log_calls`
will display `arguments: <none>`, plus any default values used:
>>> @log_calls()
... def ff(*args, **kwargs): pass
>>> ff()
ff <== called by <module>
arguments: <none>
ff ==> returning to <module>
>>> @log_calls()
... def fff(*args, kw='doh', **kwargs): pass
>>> fff()
fff <== called by <module>
arguments: <none>
defaults: kw='doh'
fff ==> returning to <module>
###[The *log_retval* parameter (default – *False*)](id:log_retval-parameter)
When true, this parameter displays the value returned by the function:
>>> @log_calls(log_retval=True)
... def f(a, b, c):
... return a + b + c
>>> _ = f(1, 2, 3)
f <== called by <module>
arguments: a=1, b=2, c=3
f return value: 6
f ==> returning to <module>
Return values longer than 77 characters are truncated and end with
a trailing ellipsis:
>>> @log_calls(log_retval=True)
... def return_long_str():
... return '*' * 100
>>> return_long_str() # doctest: +NORMALIZE_WHITESPACE
return_long_str <== called by <module>
return_long_str return value: *****************************************************************************...
return_long_str ==> returning to <module>
'****************************************************************************************************'
###[The *log_exit* parameter (default – *True*)](id:log_exit-parameter)
When false, this parameter suppresses the `... ==> returning to ...` line
that indicates the function's return to its caller.
>>> @log_calls(log_exit=False)
... def f(a, b, c):
... return a + b + c
>>> _ = f(1, 2, 3)
f <== called by <module>
arguments: a=1, b=2, c=3
###[The *log_call_numbers* parameter (default – *False*)](id:log_call_numbers-parameter)
`log_calls` keeps a running tally of the number of times a decorated function
is called. You can display this (1-based) number using the `log_call_numbers` parameter:
>>> @log_calls(log_call_numbers=True)
... def f(): pass
>>> for i in range(2): f()
f [1] <== called by <module>
f [1] ==> returning to <module>
f [2] <== called by <module>
f [2] ==> returning to <module>
The call number is also displayed when `log_retval` is true:
>>> @log_calls(log_call_numbers=True, log_retval=True)
... def f():
... return 81
>>> _ = f()
f [1] <== called by <module>
f [1] return value: 81
f [1] ==> returning to <module>
This is particularly valuable in the presence of recursion, for example.
See the [recursion example](#recursion-example) later, where the feature
is used to good effect.
**NOTE**: *As we'll see later, logging for a decorated function
can be turned on and off dynamically. In fact,* `log_calls` *also tracks the total
number of calls to a decorated function, and that number is accessible too –
see the section on [the* `stats.num_calls_total` *attribute](#stats.num_calls_total).
When the* `log_call_numbers` *setting is true, the call number displayed is
the logged call number - the rank of that call among the calls to the function
when logging has been enabled. For example, suppose you call* `f` *17 times with logging
enabled and with* `log_call_numbers` *enabled; then you turn logging off and call* `f`
*3 times; finally you re-enable logging and call* `f` *again: the number displayed will
be 18, not 21.*
###[The *log_elapsed* parameter (default – *False*)](id:log_elapsed-parameter)
For performance profiling, you can measure the time it took a function to execute
by using the `log_elapsed` keyword. When true, `log_calls` reports the time the
decorated function took to complete, in seconds:
>>> @log_calls(log_elapsed=True)
... def f(n):
... for i in range(n):
... # do something time-critical
... pass
>>> f(5000) # doctest: +ELLIPSIS
f <== called by <module>
arguments: n=5000
elapsed time: ... [secs]
f ==> returning to <module>
###[The *indent* parameter (default - *False*)](id:indent-parameter)
The `indent` parameter, when true, indents each new level of logged messages
by 4 spaces, providing a visualization of the call hierarchy.
A decorated function's logged output is indented only as much as is necessary.
Here, the even numbered functions don't indent, so the indented functions
that they call are indented just one level more than their "inherited"
indentation level:
>>> @log_calls()
... def g1():
... pass
>>> @log_calls(indent=False) # no extra indentation for g1
... def g2():
... g1()
>>> @log_calls()
... def g3():
... g2()
>>> @log_calls(indent=False) # no extra indentation for g3
... def g4():
... g3()
>>> @log_calls()
... def g5():
... g4()
>>> g5()
g5 <== called by <module>
g4 <== called by g5
g3 <== called by g4
g2 <== called by g3
g1 <== called by g2
g1 ==> returning to g2
g2 ==> returning to g3
g3 ==> returning to g4
g4 ==> returning to g5
g5 ==> returning to <module>
###[The *prefix* parameter (default - `''`): decorating methods](id:prefix-parameter)
********* TODO REWORK DOCS for `prefix`
>>> @log_calls(prefix='*** ')
... def f(): pass
>>> f()
*** f <== called by <module>
*** f ==> returning to <module>
For methods, you don't need to manually specify a *classname* + `'.'` prefix:
the classname is, by default, part of the display name used for the method.
Any prefix you do supply is prepended to that:
>>> @log_calls(prefix='*** ')
... class Cls():
... def __init__(self): pass
>>> c = Cls() # doctest: +ELLIPSIS
*** Cls.__init__ <== called by <module>
arguments: self=<__main__.Cls object at 0x...>
*** Cls.__init__ ==> returning to <module>
The test suites `tests/test_log_calls_more.py`, `tests/test_log_calls__class_deco.py`
and `tests/test_log_calls_v30_minor_features_fixes.py` contains more examples of using
`log_calls` with methods of all kinds – instance methods, classmethods and staticmethods,
and properties.
###[The *file* parameter (default - *sys.stdout*)](id:file-parameter)
The `file` parameter specifies a stream (an instance of `io.TextIOBase`) to which
`log_calls` will print its messages. This value is supplied to the `file` keyword
parameter of the `print` function, and, like that parameter, its default value is
`sys.stdout`. This parameter is ignored if you've supplied a logger for output
using the [`logger`](#logger-parameter) parameter.
If your program writes to the console a lot, you may not want `log_calls` messages
interspersed with your real output: your understanding of both logically distinct
streams can be compromised, so, better to make them two actually distinct streams.
It can also be advantageous to gather all, and only all, of the `log_calls` messages
in one place. You can use `indent=True` with a file, and the indentations will
appear as intended.
It's not simple to test this feature with doctest (in fact, there are subtleties
to supporting this feature and using doctest at all), so we'll just give an example
of writing to `stderr`, and reproduce the output:
>>> import sys
>>> @log_calls(file=sys.stderr, indent=True)
... def f(n):
... if n <= 0:
... return 'a'
... return '(' + f(n-1) + ')'
Running `>>> f(2)` will return '((a))' and will write the following to `stderr`:
f <== called by <module>
f <== called by f
arguments: n=1
f <== called by f
arguments: n=0
f ==> returning to f
f ==> returning to f
f ==> returning to <module>
"""
pass
# SURGERY:
main_basic.__doc__ = main_basic.__doc__.replace("__main__", __name__)
def main_logging():
"""
##[Using loggers](id:Logging)
`log_calls` works well with loggers obtained from Python's `logging` module –
that is, objects of type `logging.Logger`.
First, we'll set up a logger with a single handler that writes to the console.
Because `doctest` doesn't capture output written to `stderr` (the default stream
to which console handlers write), we'll send the console handler's output to
`stdout`, using the format `<loglevel>:<loggername>:<message>`.
>>> import logging
>>> import sys
>>> ch = logging.StreamHandler(stream=sys.stdout)
>>> c_formatter = logging.Formatter('%(levelname)8s:%(name)s:%(message)s')
>>> ch.setFormatter(c_formatter)
>>> logger = logging.getLogger('a_logger')
>>> logger.addHandler(ch)
>>> logger.setLevel(logging.DEBUG)
###The *logger* parameter (default – *None*)
The `logger` keyword parameter tells `log_calls` to write its output using
that logger rather than the `print` function:
>>> @log_calls(logger=logger)
... def somefunc(v1, v2):
... logger.debug(v1 + v2)
>>> somefunc(5, 16) # doctest: +NORMALIZE_WHITESPACE
DEBUG:a_logger:somefunc <== called by <module>
DEBUG:a_logger: arguments: v1=5, v2=16
DEBUG:a_logger:21
DEBUG:a_logger:somefunc ==> returning to <module>
>>> @log_calls(logger=logger)
... def anotherfunc():
... somefunc(17, 19)
>>> anotherfunc() # doctest: +NORMALIZE_WHITESPACE
DEBUG:a_logger:anotherfunc <== called by <module>
DEBUG:a_logger: somefunc <== called by anotherfunc
DEBUG:a_logger: arguments: v1=17, v2=19
DEBUG:a_logger:36
DEBUG:a_logger: somefunc ==> returning to anotherfunc
DEBUG:a_logger:anotherfunc ==> returning to <module>
The value of `logger` can be either a logger instance (a `logging.Logger`) or a string
giving the name of a logger. Instead of passing the logger instance
as above, we can simply pass `a_logger`:
>>> @log_calls(logger='a_logger')
... def yetanotherfunc():
... return 42
>>> _ = yetanotherfunc() # doctest: +NORMALIZE_WHITESPACE
DEBUG:a_logger:yetanotherfunc <== called by <module>
DEBUG:a_logger:yetanotherfunc ==> returning to <module>
This works because "all calls to [`logging.getLogger(name)`] with a given name
return the same logger instance", so that "logger instances never need to be
passed between different parts of an application"
as per the [Python documentation for
`logging.getLogger()`](https://docs.python.org/3/library/logging.html?highlight=logging.getlogger#logging.getLogger).
**NOTE**: *If the value of `logger` is a `Logger` instance that has no handlers
(which can happen if you specify a logger name for a (theretofore) nonexistent logger),
that logger won't be able to write anything, so `log_calls` will fall back to `print`.*
###The *loglevel* parameter (default – *logging.DEBUG*)
`log_calls` also takes a `loglevel` keyword parameter, an `int` whose value must be
one of the `logging` module's constants - `logging.DEBUG`, `logging.INFO`, etc.
– or a custom logging level if you've added any. `log_calls` writes output messages
using `logger.log(loglevel, …)`. Thus, if the `logger`'s log level is higher than
`loglevel`, no output will appear:
>>> logger.setLevel(logging.INFO) # raise logger's level to INFO
>>> @log_calls(logger='logger_=', loglevel=logging.DEBUG)
... def f(x, y, z, **kwargs):
... return y + z
>>> # No log_calls output from f
>>> # because loglevel for f < level of logger
>>> f(1,2,3, logger_=logger) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
5
The use of loggers, and of these parameters, is explored further in the later
example [Using a logger with multiple handlers that have different loglevels](#logging-multiple-handlers).
"""
pass
def main__call_chains():
"""
##Call chains
`log_calls` does its best to chase back along the call chain to find
the first *enabled* `log_calls`-decorated function on the stack.
If there's no such function, it just displays the immediate caller.
If there is such a function, however, when reporting calls and returns
it displays the entire list of functions on the stack up to and including
that function. Without this, you'd have to guess at what was called
in between calls to functions decorated by `log_calls`. If you specified
a prefix for the decorated caller on the end of a call chain, `log_calls`
will use the prefixed name:
>>> @log_calls()
... def g1():
... pass
>>> def g2():
... g1()
>>> @log_calls(prefix='mid.')
... def g3():
... g2()
>>> def g4():
... g3()
>>> @log_calls()
... def g5():
... g4()
>>> g5()
g5 <== called by <module>
mid.g3 <== called by g4 <== g5
g1 <== called by g2 <== mid.g3
g1 ==> returning to g2 ==> mid.g3
mid.g3 ==> returning to g4 ==> g5
g5 ==> returning to <module>
In the next example, `g` is `log_calls`-decorated but logging is disabled,
so the reported call chain for `f` stops at its immediate caller:
>>> @log_calls()
... def f(): pass
>>> def not_decorated(): f()
>>> @log_calls(enabled=False, log_call_numbers=True)
... def g(): not_decorated()
>>> g()
f <== called by not_decorated
f ==> returning to not_decorated
Elaborating on the previous example, here are longer call chains with an
intermediate decorated function that has logging disabled:
>>> @log_calls()
... def e(): pass
>>> def not_decorated_call_e(): e()
>>> @log_calls()
... def f(): not_decorated_call_e()
>>> def not_decorated_call_f(): f()
>>> @log_calls(enabled=False, log_call_numbers=True)
... def g(): not_decorated_call_f()
>>> @log_calls()
... def h(): g()
>>> h()
h <== called by <module>
f <== called by not_decorated_call_f <== g <== h
e <== called by not_decorated_call_e <== f
e ==> returning to not_decorated_call_e ==> f
f ==> returning to not_decorated_call_f ==> g ==> h
h ==> returning to <module>
Finally, a test with decorated functions in the call chain for which
logging is "bypassed":
>>> @log_calls()
... def h1():
... pass
>>> @log_calls(enabled=-1)
... def h2():
... h1()
>>> @log_calls()
... def h3():
... h2()
>>> @log_calls(enabled=-1)
... def h4():
... h3()
>>> @log_calls()
... def h5():
... h4()
>>> h5()
h5 <== called by <module>
h3 <== called by h4 <== h5
h1 <== called by h2 <== h3
h1 ==> returning to h2 ==> h3
h3 ==> returning to h4 ==> h5
h5 ==> returning to <module>
###[Another *indent* example](id:indent-parameter-another)
In the next example, `g3` has logging disabled, so calls to it are not logged.
`log_calls` chases back to the nearest *enabled* decorated function, so that there
aren't gaps between call chains. The indentation levels are as you'd hope them to be:
>>> @log_calls(indent=True)
... def g1():
... pass
>>> def g2():
... g1()
>>> @log_calls(enabled=False, indent=True) # not logged, causes no indentation for g1
... def g3():
... g2()
>>> @log_calls(indent=True)
... def g4():
... g3()
>>> @log_calls(indent=True)
... def g5():
... g4()
>>> g5()
g5 <== called by <module>
g4 <== called by g5
g1 <== called by g2 <== g3 <== g4
g1 ==> returning to g2 ==> g3 ==> g4
g4 ==> returning to g5
g5 ==> returning to <module>
We'll continue to use `indent` throughout this section.
###Call chains and inner functions
When chasing back along the stack, `log_calls` also detects inner functions
that it has decorated:
>>> @log_calls(indent=True)
... def h0(z):
... pass
>>> def h1(x):
... @log_calls(indent=True)
... def h1_inner(y):
... h0(x*y)
... return h1_inner
>>> def h2():
... h1(2)(3)
>>> def h3():
... h2()
>>> def h4():
... @log_calls(indent=True)
... def h4_inner():
... h3()
... return h4_inner
>>> @log_calls(indent=True)
... def h5():
... h4()()
>>> h5()
h5 <== called by <module>
h4.<locals>.h4_inner <== called by h5
h1.<locals>.h1_inner <== called by h2 <== h3 <== h4.<locals>.h4_inner
arguments: y=3
h0 <== called by h1.<locals>.h1_inner
arguments: z=6
h0 ==> returning to h1.<locals>.h1_inner
h1.<locals>.h1_inner ==> returning to h2 ==> h3 ==> h4.<locals>.h4_inner
h4.<locals>.h4_inner ==> returning to h5
h5 ==> returning to <module>
... even when the inner function is called from within the outer function
it's defined in:
>>> @log_calls(indent=True)
... def j0():
... pass
>>> def j1():
... j0()
>>> def j2():
... @log_calls(indent=True)
... def j2_inner():
... j1()
... j2_inner()
>>> @log_calls(indent=True)
... def j3():
... j2()
>>> j3()
j3 <== called by <module>
j2.<locals>.j2_inner <== called by j2 <== j3
j0 <== called by j1 <== j2.<locals>.j2_inner
j0 ==> returning to j1 ==> j2.<locals>.j2_inner
j2.<locals>.j2_inner ==> returning to j2 ==> j3
j3 ==> returning to <module>
###Call chains and *log_call_numbers*
If a decorated function `g` calls another decorated function `f`,
and if `f` is enabled and has `log_call_numbers` set to true,
then the call number of f will be displayed in the call chain:
>>> @log_calls()
... def f(): pass
>>> def not_decorated(): f()
>>> @log_calls(log_call_numbers=True)
... def g(): not_decorated()
>>> g()
g [1] <== called by <module>
f <== called by not_decorated <== g [1]
f ==> returning to not_decorated ==> g [1]
g [1] ==> returning to <module>
###[Indentation and call numbers with recursion](id:recursion-example)
These features are especially useful in recursive and mutually recursive
situations. We have to use `OrderedDict`s here because they're more
doctest-friendly:
>>> from collections import OrderedDict
>>> @log_calls(log_call_numbers=True, log_retval=True, indent=True)
... def depth(d, key=None):
... if not isinstance(d, dict):
... return 0 # base case
... elif not d:
... return 1
... else:
... return max(map(depth, d.values(), d.keys())) + 1
>>> depth(
... OrderedDict(
... (('a', 0),
... ('b', OrderedDict( (('c1', 10), ('c2', 11)) )),
... ('c', 'text'))
... )
... )
depth [1] <== called by <module>
arguments: d=OrderedDict([('a', 0), ('b', OrderedDict([('c1', 10), ('c2', 11)])), ('c', 'text')])
defaults: key=None
depth [2] <== called by depth [1]
arguments: d=0, key='a'
depth [2] return value: 0
depth [2] ==> returning to depth [1]
depth [3] <== called by depth [1]
arguments: d=OrderedDict([('c1', 10), ('c2', 11)]), key='b'
depth [4] <== called by depth [3]
arguments: d=10, key='c1'
depth [4] return value: 0
depth [4] ==> returning to depth [3]
depth [5] <== called by depth [3]
arguments: d=11, key='c2'
depth [5] return value: 0
depth [5] ==> returning to depth [3]
depth [3] return value: 1
depth [3] ==> returning to depth [1]
depth [6] <== called by depth [1]
arguments: d='text', key='c'
depth [6] return value: 0
depth [6] ==> returning to depth [1]
depth [1] return value: 2
depth [1] ==> returning to <module>
2
**NOTE**: *The optional* `key` *parameter is for instructional purposes,
so you can see the key that's paired with the value of* `d` *in the caller's
dictionary. Typically the signature of this function would be just* `def depth(d)`,
*and the recursive case would return* `max(map(depth, d.values())) + 1`.
"""
pass
def main__log_message():
"""
## [The indent-aware writing method *log_message()*](id:log_message)
`log_calls` exposes the method it uses to write its messages, `log_message`,
whose full signature is:
`log_message(msg, *msgs, sep=' ',
extra_indent_level=1, prefix_with_name=False)`
This method takes one or more "messages" (anything you want to see as a string),
and writes one final output message formed by joining those messages separated
by `sep`.
`extra_indent_level` is a number of 4-column-wide *indent levels* specifying
where to begin writing that message. This value x 4 is an offset in columns
from the left margin of the visual frame established by log_calls – that is,
an offset from the column in which the function entry/exit messages begin. The default
of 1 aligns the message with the "arguments: " line of `log_calls`'s output.
`prefix_with_name` is a `bool`. If true, the final message is prefaced with the
possibly prefixed name of the function (using the `prefix` setting),
plus possibly its call number in square brackets (if the `log_call_numbers` setting
is true).
If a decorated function or method writes debugging messages, even multiline
messages, it can use this method to write them so that they sit nicely within
the frame provided by `log_calls`.
Consider the following function:
>>> @log_calls(indent=True, log_call_numbers=True)
... def f(n):
... if n <= 0:
... print("*** Base case n <= 0")
... else:
... print("*** n=%d is %s,\\n but we knew that."
... % (n, "odd" if n%2 else "even"))
... print("*** (n=%d) We'll be right back, after this:" % n)
... f(n-1)
... print("*** (n=%d) We're back." % n)
>>> f(2)
f [1] <== called by <module>
arguments: n=2
*** n=2 is even,
but we knew that.
*** (n=2) We'll be right back, after this:
f [2] <== called by f [1]
arguments: n=1
*** n=1 is odd,
but we knew that.
*** (n=1) We'll be right back, after this:
f [3] <== called by f [2]
arguments: n=0
*** Base case n <= 0
f [3] ==> returning to f [2]
*** (n=1) We're back.
f [2] ==> returning to f [1]
*** (n=2) We're back.
f [1] ==> returning to <module>
The debugging messages written by `f` literally "stick out", and it becomes difficult,
especially in more complex situations with multiple functions and methods,
to figure out who actually wrote which message; hence the "(n=%d)" tag. If instead
`f` uses `log_message`, all of its messages from each invocation align neatly
within the `log_calls` visual frame. We take this opportunity to also
illustrate the keyword parameters of `log_message`:
>>> @log_calls(indent=True, log_call_numbers=True)
... def f(n):
... if n <= 0:
... f.log_message("Base case n =", n, prefix_with_name=True)
... else:
... f.log_message("*** n=%d is %s,\\n but we knew that."
... % (n, "odd" if n%2 else "even"),
... extra_indent_level=0)
... f.log_message("We'll be right back", "after this:",
... sep=", ", prefix_with_name=True)
... f(n-1)
... f.log_message("We're back.", prefix_with_name=True)
>>> f(2)
f [1] <== called by <module>
arguments: n=2
*** n=2 is even,
but we knew that.
f [1]: We'll be right back, after this:
f [2] <== called by f [1]
arguments: n=1
*** n=1 is odd,
but we knew that.
f [2]: We'll be right back, after this:
f [3] <== called by f [2]
arguments: n=0
f [3]: Base case n = 0
f [3] ==> returning to f [2]
f [2]: We're back.
f [2] ==> returning to f [1]
f [1]: We're back.
f [1] ==> returning to <module>
The `log_message()` method works whether the output destination is `stdout`,
another stream, a file, or a logger. The test file `test_log_calls_more.py`
contains an example `main__log_message__all_possible_output_destinations()`
which illustrates that.
**NOTE**: *In the example above, `f` accesses one of its attributes added by
`log_calls`, namely, the `log_message()` method. (`log_calls` in fact adds two
more attributes, discussed in subsequent sections:
[`log_calls_settings`](#Dynamic-control-log_calls_settings) and [`stats`](#call-history-and-statistics).)
Indeed, any function, and any static method, can access its `log_calls` attributes
in the same syntactically straightforward way. Classmethods and instance methods
decorated by `log_calls` can also use `log_message()`, but each of those kinds
of methods requires its own approach (a little more syntax) to obtaining the
`log_calls` wrapper which hosts the attributes. See the section
[Functions and methods accessing their own *log_calls* attributes](#accessing-own-attrs) for details.*
"""
pass
def main__using_log_calls_settings__dynamic_control_of_settings():
"""
##[Dynamic control of settings using the *log_calls_settings* attribute](id:Dynamic-control-log_calls_settings)
The values given for the parameters of `log_calls`, e.g. `enabled=True`,
`args_sep=" / "`, are set once the decorated function is interpreted.
The values are established once and for all when the Python interpreter
parses the definition of a decorated function and creates a function object.
###[The problem](id:problem)
Even if a variable is used as a parameter value, its value at the time
Python processes the definition is "frozen" for the created function object.
Subsequently changing the value of the variable will *not* affect the behavior
of the decorator.
For example, suppose `DEBUG` is a module-level variable initialized to `False`:
>>> DEBUG = False
and you use this code:
>>> @log_calls(enabled=DEBUG)
... def foo(**kwargs):
... pass
>>> foo() # No log_calls output: DEBUG is False
If later you set `Debug = True` and call `foo`, nothing will be written,
because `foo`'s *enabled* setting is bound to the original value
of `DEBUG`, established when the definition was processed:
>>> DEBUG = True
>>> foo() # Still no log_calls output
This is simply how Python processes default values.
###Solutions
`log_calls` provides *two* ways to dynamically control the settings of a decorated function.
This section presents one of them – using `log_calls_settings`. The next section,
on [indirect values](#Indirect-values), discusses another, rather different solution,
one that's more intrusive but which affords even more control.
###The *log_calls_settings* attribute
The `log_calls` decorator adds an attribute `log_calls_settings`
to a decorated function, through which you can access the decorator settings
for that function. This attribute is an object which lets you control
the settings for a decorated function via a mapping (dict-like) interface,
and equivalently, via attributes of the object. The mapping keys and
the attribute names are simply the `log_calls` keywords. `log_calls_settings`
also implements many of the standard `dict` methods for interacting with the
settings in familiar ways.
###The mapping interface and the attribute interface to settings
Once you've decorated a function with `log_calls`,
>>> @log_calls()
... def f(*args, **kwargs):
... return 91
you can access and change its settings via the `log_calls_settings` attribute
of the decorated function, which behaves like a dictionary. You can read and
write settings using the `log_calls` keywords as keys:
>>> f.log_calls_settings['enabled']
True
>>> f.log_calls_settings['enabled'] = False
>>> _ = f() # no output (not even 91, because of "_ = ")
>>> f.log_calls_settings['enabled']
False
>>> f.log_calls_settings['log_retval']
False
>>> f.log_calls_settings['log_retval'] = True
>>> f.log_calls_settings['log_elapsed']
False
>>> f.log_calls_settings['log_elapsed'] = True
The `log_calls_settings` attribute has a length:
>>> len(f.log_calls_settings)
15
Its keys and items can be iterated through:
>>> keys = []
>>> for k in f.log_calls_settings: keys.append(k)
>>> keys # doctest: +NORMALIZE_WHITESPACE
['enabled', 'args_sep', 'log_args',
'log_retval', 'log_elapsed', 'log_exit',
'indent', 'log_call_numbers',
'prefix', 'file',
'logger', 'loglevel', 'mute',
'record_history', 'max_history']
>>> list(f.log_calls_settings.items()) # doctest: +NORMALIZE_WHITESPACE
[('enabled', False), ('args_sep', ', '), ('log_args', True),
('log_retval', True), ('log_elapsed', True), ('log_exit', True),
('indent', True), ('log_call_numbers', False),
('prefix', ''), ('file', None),
('logger', None), ('loglevel', 10),
('mute', False),
('record_history', False), ('max_history', 0)]
You can use `in` to test for key membership:
>>> 'enabled' in f.log_calls_settings
True
>>> 'no_such_setting' in f.log_calls_settings
False
As with an ordinary dictionary, attempting to access the value
of a nonexistent setting raises `KeyError`:
>>> f.log_calls_settings['new_key'] # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
KeyError: ...
Unlike an ordinary dictionary, you can't add new keys – the `log_calls_settings`
dictionary is closed to new members, and attempts to add one will raise `KeyError`:
>>> f.log_calls_settings['new_key'] = 'anything' # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
KeyError: ...
You can use the same keywords as attributes of `log_calls_settings`
instead of as keywords to the mapping interface; they're equivalent:
>>> f.log_calls_settings.log_elapsed
True
>>> f.log_calls_settings.log_call_numbers
False
>>> f.log_calls_settings.log_call_numbers = True
>>> f.log_calls_settings.enabled = True # turn it back on!
>>> _ = f() # doctest: +ELLIPSIS
f [1] <== called by <module>
arguments: <none>
f [1] return value: 91
elapsed time: ... [secs], process time: ... [secs]
f [1] ==> returning to <module>
>>> f.log_calls_settings.log_args = False
>>> f.log_calls_settings.log_elapsed = False
>>> f.log_calls_settings.log_retval = False
>>> f() # doctest: +ELLIPSIS
f [2] <== called by <module>
f [2] ==> returning to <module>
91
The only difference is that you *can* add a new attribute to `log_calls_settings`,
simply by using it:
>>> f.log_calls_settings.new_attr = 'something'
>>> f.log_calls_settings.new_attr
'something'
But the new attribute still isn't a decorator setting:
>>> 'new_attr' in f.log_calls_settings
False
### The *update()*, *as_OD()* and *as_dict()* methods
The `log_calls_settings` object provides an `update()` method so that
you can update several settings at once:
>>> f.log_calls_settings.update(
... log_args=True, log_elapsed=False, log_call_numbers=False,
... log_retval=False)
>>> _ = f()
f <== called by <module>
arguments: <none>
f ==> returning to <module>
You can retrieve the entire collection of settings as either an `OrderedDict`
using the `as_OD()` method, or as a `dict` using `as_dict()`.
Either can serve as a snapshot of the settings, so that you can change settings
temporarily, use the new settings, and then restore settings from the snapshot.
in addition to taking keyword arguments, as shown above, the `update()` method
can take one or more dicts – in particular, a dictionary retrieved from one of
the `as_*` methods. For example:
Retrieve settings (here, as an `OrderedDict` because it's more doctest-friendly,
but using `as_dict()` is sufficient):
>>> od = f.log_calls_settings.as_OD()
>>> od # doctest: +NORMALIZE_WHITESPACE
OrderedDict([('enabled', True), ('args_sep', ', '),
('log_args', True), ('log_retval', False),
('log_elapsed', False), ('log_exit', True),
('indent', True), ('log_call_numbers', False),
('prefix', ''), ('file', None),
('logger', None), ('loglevel', 10),
('mute', False),
('record_history', False), ('max_history', 0)])
Change settings temporarily:
>>> f.log_calls_settings.update(
... log_args=False, log_elapsed=True, log_call_numbers=True,
... log_retval=True)
Use the new settings for `f`:
>>> _ = f() # doctest: +ELLIPSIS
f [4] <== called by <module>
f [4] return value: 91
elapsed time: ... [secs], process time: ... [secs]
f [4] ==> returning to <module>
Now restore original settings, this time passing the retrieved settings
dictionary rather than keywords:
>>> f.log_calls_settings.update(od)
>>> od == f.log_calls_settings.as_OD()
True
--------
You can also update with `log_calls.get_defaults_OD()`:
>>> f.log_calls_settings.update(log_calls.get_defaults_OD())
>>> f.log_calls_settings.as_OD() == log_calls.get_defaults_OD()
True
or even with `log_calls.get_factory_defaults_OD()`, though that will be the same `OrderedDict`
as `log_calls.get_defaults_OD()` unless you have changed defaults using `log_calls.set_defaults(...)`.
**NOTES**:
1. *The [`prefix`](#prefix-parameter) and [`max_history`](#max_history-parameter)
settings are "immutable" (no other settings are), and attempts to change them
directly (e.g.* `f.log_calls_settings.max_history = anything`) *raise* `ValueError`.
*Nevertheless, they* are *items in the retrieved settings dictionaries. To allow for
the use-case just illustrated, `update()` is considerate enough to skip over
immutable settings.*
2. `log_calls` *continues to track call numbers even when it isn't reporting
them: it tracks them regardless of the `log_call_numbers` setting. Thus, the
last call to* `f` *was the 4th, as shown, although the call number of the 3rd
call wasn't displayed.*
"""
def main__indirect_parameter_values__dynamic_control():
"""
##[Dynamic control of settings with indirect values](id:Indirect-values)
Every parameter of `log_calls` except `prefix` and `max_history` can take
two kinds of values: *direct* and *indirect*, which you can think of as
*static* and *dynamic* respectively. Direct/static values are actual values
used when the decorated function is interpreted, e.g. `enabled=True`,
`args_sep=" / "`. As discussed in the previous section on
[`log_call_settings`](#Dynamic-control-log_calls_settings), the values of
parameters are set once and for all when the Python interpreter creates
a function object from the source code of a decorated function. Even if you
use a variable as the value of a setting, subsequently changing the variable's
value has no effect on the decorator's setting.
`log_calls` provides a second way to overcome this limitation. The decorator
lets you specify any parameter
except `prefix` or `max_history` with one level of indirection, by using
*indirect values*: an indirect value is a string that names a keyword argument
*of the decorated function*. It can be an explicit keyword argument present
in the signature of the function, or an implicit keyword argument that ends up
in `**kwargs` (if that's present in the function's signature). When the decorated
function is called, the arguments passed by keyword, and the decorated function's
explicit keyword parameters with default values, are both searched for the named
parameter; if it is found and of the correct type, *its* value is used; otherwise
a default value is used.
To specify an indirect value for a parameter whose normal values are or can be `str`s (only
`args_sep` and `logger`, at present), append an `'='` to the value. For consistency,
any indirect value can end in a trailing `'='`, which is stripped. Thus,
`enabled='enable_='` indicates an indirect value *to be supplied* by the keyword
(argument or parameter) `enable_` of a decorated function.
So, in:
>>> @log_calls(args_sep='sep=', prefix="*** ")
... def f(a, b, c, sep='|'): pass
`args_sep` has an indirect value which names `f`'s explicit keyword parameter
`sep`, and `prefix` has a direct value as it always does. A call can dynamically
override the default value '|' in the signature of `f` by supplying a value:
>>> f(1, 2, 3, sep=' / ')
*** f <== called by <module>
arguments: a=1 / b=2 / c=3 / sep=' / '
*** f ==> returning to <module>
or it can use `f`'s default value by not supplying a `sep` argument:
>>> f(1, 2, 3)
*** f <== called by <module>
arguments: a=1|b=2|c=3
defaults: sep='|'
*** f ==> returning to <module>
*A decorated function doesn't have to explicitly declare the parameter
named as an indirect value*, if its signature includes `**kwargs`:
the intermediate parameter can be an implicit keyword parameter,
passed by a caller but not present in the function's signature.
Consider:
>>> @log_calls(enabled='enable')
... def func1(a, b, c, **func1_kwargs): pass
>>> @log_calls(enabled='enable')
... def func2(z, **func2_kwargs): func1(z, z+1, z+2, **func2_kwargs)
When the following statement is executed, the calls to both func1 and func2
will be logged:
>>> func2(17, enable=True)
func2 <== called by <module>
arguments: z=17, **func2_kwargs={'enable': True}
func1 <== called by func2
arguments: a=17, b=18, c=19, **func1_kwargs={'enable': True}
func1 ==> returning to func2
func2 ==> returning to <module>
whereas neither of the following two statements will trigger logging:
>>> func2(42, enable=False) # no log_calls output
>>> func2(99) # no log_calls output
**NOTE**: *This last example illustrates a subtle point:
if you omit the* `enabled` *parameter altogether, logging will occur,
as the default value is (the direct value)* `True`; *however, if you
specify an indirect value for* `enabled` *and the named indirect
keyword is not supplied in a call, then that call* won't *be logged.
In other words, if you specify an indirect value for the* `enabled` *parameter
then the effective default value of the enabled setting is* `False`* --
calls are not logged unless the named parameter is found and its value is true.*
###Controlling format 'from above'
This indirection mechanism allows a calling function to control the appearance
of logged calls to functions lower in the call chain, provided they all use
the same indirect parameter keywords.
In the next example, the separator value supplied to `g` by keyword argument
propagates to `f`. Note that the arguments `42` and `99` end up in `g`'s
positional *varargs* tuple. We've used non-generic names for the *varargs*
to illustrate that whatever you call these parameters, their roles are
unambiguous and `log_calls` will find and use their names:
>>> @log_calls(args_sep='sep=')
... def f(a, b, c, **kwargs): pass
>>> @log_calls(args_sep='sep=')
... def g(a, b, c, *g_args, **g_kwargs):
... f(a, b, c, **g_kwargs)
>>> g(1,2,3, 42, 99, sep='\\n') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
g <== called by <module>
arguments:
a=1
b=2
c=3
*g_args=(42, 99)
**g_kwargs={'sep': '\\n'}
f <== called by g
arguments:
a=1
b=2
c=3
**kwargs={'sep': '\\n'}
f ==> returning to g
g ==> returning to <module>
####Controlling indentation 'from above'
Similarly, you can control indentation from above.
>>> @log_calls(indent='lc_indent', log_call_numbers=True)
... def f(n, **kwargs):
... if n <= 0:
... return
... f(n-1, **kwargs)
>>> @log_calls(indent='lc_indent')
... def g(n, **kwargs):
... f(n+1, **kwargs)
Without an indirect value for `indent`, `log_calls` displays the calls to
`f` and `g` in a "flat" way:
>>> g(1) #, lc_indent=True)
g <== called by <module>
arguments: n=1
f [1] <== called by g
arguments: n=2
f [2] <== called by f [1]
arguments: n=1
f [3] <== called by f [2]
arguments: n=0
f [3] ==> returning to f [2]
f [2] ==> returning to f [1]
f [1] ==> returning to g
g ==> returning to <module>
but the call hierarchy is represented visually when you pass the specified
indirect value:
>>> g(2, lc_indent=True)
g <== called by <module>
arguments: n=2, **kwargs={'lc_indent': True}
f [4] <== called by g
arguments: n=3, **kwargs={'lc_indent': True}
f [5] <== called by f [4]
arguments: n=2, **kwargs={'lc_indent': True}
f [6] <== called by f [5]
arguments: n=1, **kwargs={'lc_indent': True}
f [7] <== called by f [6]
arguments: n=0, **kwargs={'lc_indent': True}
f [7] ==> returning to f [6]
f [6] ==> returning to f [5]
f [5] ==> returning to f [4]
f [4] ==> returning to g
g ==> returning to <module>
###Enabling with *int*s rather than *bool*s
Sometimes it's desirable for a function to print or log debugging messages
as it executes. It's the oldest form of debugging! Instead of a simple `bool`,
you can use a nonnegative `int` as the enabling value and treat it as a level
of verbosity.
>>> DEBUG_MSG_BASIC = 1
>>> DEBUG_MSG_VERBOSE = 2
>>> DEBUG_MSG_MOREVERBOSE = 3 # etc.
>>> @log_calls(enabled='debuglevel=')
... def do_stuff_with_commentary(*args, debuglevel=0):
... if debuglevel >= DEBUG_MSG_VERBOSE:
... print("*** extra debugging info ***")
No output:
>>> do_stuff_with_commentary()
Only `log_calls` output:
>>> do_stuff_with_commentary(debuglevel=DEBUG_MSG_BASIC)
do_stuff_with_commentary <== called by <module>
arguments: debuglevel=1
do_stuff_with_commentary ==> returning to <module>
`log_calls` output plus the function's debugging reportage:
>>> do_stuff_with_commentary(debuglevel=DEBUG_MSG_VERBOSE)
do_stuff_with_commentary <== called by <module>
arguments: debuglevel=2
*** extra debugging info ***
do_stuff_with_commentary ==> returning to <module>
The [metaclass example](#A-metaclass-example) below also makes use of this technique.
### Using *log_calls_settings* to set indirect values
is perfectly legitimate:
>>> @log_calls(enabled=False)
... def g(*args, **kwargs):
... return sum(args)
>>> g.log_calls_settings.enabled = 'enable_log_calls='
>>> g(1, 2, 3, enable_log_calls=True)
g <== called by <module>
arguments: *args=(1, 2, 3), **kwargs={'enable_log_calls': True}
g ==> returning to <module>
6
"""
pass
def main_call_history_and_statistics():
"""
##[Call history and statistics – the *stats* attribute and the *\*_history* parameters](id:call-history-and-statistics)
Unless it's [bypassed](#bypass),`log_calls` always collects at least
a few basic statistics about calls to a decorated function.
It can collect the entire history of calls to a function if asked
to (using the [`record_history` parameter](#record_history-parameter)).
The statistics and history are accessible via the `stats` attribute
which `log_calls` adds to a decorated function.
###The *stats* attribute
The `stats` attribute is a collection of read-only performance and profiling
data attributes, plus one method.
The class of the `stats` has its own test suite,
so here we only illustrate and discuss its use by `log_calls`.
Define a decorated function with call number logging turned on,
but with exit logging turned off for brevity:
>>> @log_calls(log_call_numbers=True, log_exit=False)
... def f(a, *args, x=1, **kwargs): pass
Let's call it 2 times:
>>> f(0)
f [1] <== called by <module>
arguments: a=0
defaults: x=1
>>> f(1, 100, 101, x=1000, y=1001)
f [2] <== called by <module>
arguments: a=1, *args=(100, 101), x=1000, **kwargs={'y': 1001}
and explore its `stats`.
###The *num_calls_logged* attribute
The `stats.num_calls_logged` attribute contains the number of the most
recent logged call to a decorated function. Thus, `f.stats.num_calls_logged`
will equal 2:
>>> f.stats.num_calls_logged
2
This counter gets incremented when a decorated function is called that has
logging enabled, even if its `log_call_numbers` setting is false.
###[The *num_calls_total* attribute](id:stats.num_calls_total)
The `stats.num_calls_total` attribute holds the *total* number of calls
to a decorated function. This counter gets incremented even when logging
is disabled for a function (`enabled` == 0), but **not** if logging is "bypassed"
(`enabled` < 0).
For example, let's now *disable* logging for `f` and call it 3 more times:
>>> f.log_calls_settings.enabled = False
>>> for i in range(3): f(i)
Now `stats.num_calls_total` will equal 5, but `f.stats.num_calls_logged`
will still equal 2:
>>> f.stats.num_calls_total
5
>>> f.stats.num_calls_logged
2
As a further illustration, let's re-enable logging for `f` and call it again.
The displayed call number will the number of the *logged* call, 3, the same
value as `f.stats.num_calls_logged` after (and during!) the call:
>>> f.log_calls_settings.enabled = True
>>> f(10, 20, z=5000)
f [3] <== called by <module>
arguments: a=10, *args=(20,), **kwargs={'z': 5000}
defaults: x=1
>>> f.stats.num_calls_total
6
>>> f.stats.num_calls_logged
3
**ATTENTION**: *Thus,* `log_calls` *has some overhead even when it's disabled,
though of course less than when it's enabled.* **Comment it out in production code!**
Finally, let's verify the claim that "bypassing" logging turns off tallying
of both call counters:
>>> f.log_calls_settings.enabled = -1
>>> f(10, 20, z=5000) # no `log_calls` output
>>> f.stats.num_calls_total
6
>>> f.stats.num_calls_logged
3
Before moving on, we'll restore logging for `f`:
>>> f.log_calls_settings.enabled = True
###The *stats.elapsed_secs_logged* attribute
The `stats.elapsed_secs_logged` attribute holds the sum of the elapsed times
of all logged calls to a decorated function, in seconds. It's
not possible to doctest this so we'll just exhibit its value for the 3 logged
calls to `f` above:
>>> f.stats.elapsed_secs_logged # doctest: +SKIP
1.1463998816907406e-05
###The *stats.process_secs_logged* attribute
The `stats.process_secs_logged` attribute holds the sum of the "process times" of all logged calls to a decorated function, in seconds.
Similarly, we'll just exhibit its value for the 3 logged calls to `f` above:
>>> f.stats.process_secs_logged # doctest: +SKIP
1.1000000000038757e-05
###[The *record_history* parameter (default – *False*)](id:record_history-parameter)
When the `record_history` setting is true for a decorated function `f`, `log_calls` will
retain a sequence of records holding the details of each logged call to that function.
That history is accessible via attributes of the `stats` object. We'll illustrate
with a familiar example.
Let's define `f` just as before, but with `record_history` set to true:
>>> @log_calls(record_history=True, log_call_numbers=True, log_exit=False)
... def f(a, *args, x=1, **kwargs): pass
With logging enabled, let's call `f` three times:
>>> f(0)
f [1] <== called by <module>
arguments: a=0
defaults: x=1
>>> f(1, 100, 101, x=1000, y=1001)
f [2] <== called by <module>
arguments: a=1, *args=(100, 101), x=1000, **kwargs={'y': 1001}
>>> f(10, 20, z=5000)
f [3] <== called by <module>
arguments: a=10, *args=(20,), **kwargs={'z': 5000}
defaults: x=1
No surprises there. But now, `f` has a call history, which we'll examine next.
####The *stats.history* attribute
The `stats.history` attribute of a decorated function provides the call history
of logged calls to the function as a tuple of records. Here's `f`'s history,
hand-formatted for readability:
>>> print('\\n'.join(map(str, f.stats.history))) # doctest: +SKIP
CallRecord(call_num=1, argnames=['a'], argvals=(0,), varargs=(),
explicit_kwargs=OrderedDict(),
defaulted_kwargs=OrderedDict([('x', 1)]), implicit_kwargs={},
retval=None,
elapsed_secs=3.0049995984882116e-06,
process_secs=2.9999999999752447e-06,
timestamp='10/28/14 15:56:13.733763',
prefixed_func_name='f', caller_chain=['<module>'])
CallRecord(call_num=2, argnames=['a'], argvals=(1,), varargs=(100, 101),
explicit_kwargs=OrderedDict([('x', 1000)]),
defaulted_kwargs=OrderedDict(), implicit_kwargs={'y': 1001},
retval=None,
elapsed_secs=3.274002665420994e-06,
process_secs=3.0000000000030003e-06,
timestamp='10/28/14 15:56:13.734102',
prefixed_func_name='f', caller_chain=['<module>'])
CallRecord(call_num=3, argnames=['a'], argvals=(10,), varargs=(20,),
explicit_kwargs=OrderedDict(),
defaulted_kwargs=OrderedDict([('x', 1)]), implicit_kwargs={'z': 5000},
retval=None,
elapsed_secs=2.8769973141606897e-06,
process_secs=2.9999999999752447e-06,
timestamp='10/28/14 15:56:13.734412',
prefixed_func_name='f', caller_chain=['<module>'])
The CSV representation pairs
the `argnames` with their values in `argvals` (the `argnames` become column headings),
making it even more human-readable, especially when viewed in a program that
presents CSVs nicely.
####The *CallRecord* namedtuple
For the record, the records that comprise a decorated function's history are
`namedtuple`s of type `CallRecord`, whose fields are:
>>> from log_calls import CallRecord
>>> print('\\n'.join(CallRecord._fields))
call_num
argnames
argvals
varargs
explicit_kwargs
defaulted_kwargs
implicit_kwargs
retval
elapsed_secs
process_secs
timestamp
prefixed_func_name
caller_chain
By now, the significance of each field should be clear.
####[*stats.elapsed_secs_logged* == sum of *elapsed_secs* "column" of *stats.history*](id:elapsed_secs_logged-equal-sum-etc)
as you would expect. Similarly, `stats.process_secs_logged` ==
sum of the `process_secs` "column" of `stats.history`. This is
[demonstrated](./record_history.html#elapsed_secs_logged-equal-sum-etc)
in the documentation for the `record_history` decorator, a subset of
`log_calls` which records call history and statistics but writes no messages.
###[The *max_history* parameter (default – 0)](id:max_history-parameter)
The `max_history` parameter determines how many call history records are retained
for a decorated function whose call history is recorded. If this value is 0
(the default) or negative, unboundedly many records are retained (unless or until
you set the `record_history` setting to false, or call the
[`stats.clear_history()`](#stats.clear_history) method). If the value of `max_history`
is > 0, `log_calls` will retain at most that many records, discarding the oldest
records to make room for newer ones if the history reaches capacity.
An example:
>>> @log_calls(record_history=True, max_history=2,
... log_args=False, log_exit=False, log_call_numbers=True)
... def g(a): pass
>>> for i in range(3): g(i)
g [1] <== called by <module>
g [2] <== called by <module>
g [3] <== called by <module>
Here's `g`'s call history:
>>> print('\\n'.join(map(str, g.stats.history))) # doctest: +SKIP
CallRecord(call_num=2, argnames=['a'], argvals=(1,), varargs=(),
explicit_kwargs=OrderedDict(),
defaulted_kwargs=OrderedDict(), implicit_kwargs={},
retval=None,
elapsed_secs=2.239001332782209e-06,
process_secs=2.000000000002e-06,
timestamp='10/28/14 20:51:12.376714',
prefixed_func_name='g', caller_chain=['<module>'])
CallRecord(call_num=3, argnames=['a'], argvals=(2,), varargs=(),
explicit_kwargs=OrderedDict(),
defaulted_kwargs=OrderedDict(), implicit_kwargs={},
retval=None,
elapsed_secs=2.6509987947065383e-06,
process_secs=2.000000000002e-06,
timestamp='10/28/14 20:51:12.376977',
prefixed_func_name='g', caller_chain=['<module>'])
The first call (`call_num=1`) was discarded to make room for the last call
(`call_num=3`) because the call history size is set to 2.
You cannot change `max_history` using the mapping interface or the attribute
of the same name; attempts to do so raise `ValueError`:
>>> g.log_calls_settings.max_history = 17 # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...
>>> g.log_calls_settings['max_history'] = 17 # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...
The only way to change its value is with the [`clear_history`](#clear_history-method) method.
###The *stats.history_as_csv* attribute
The value `stats.history_as_csv` attribute is a text representation
of a decorated function's call history in CSV format. You can save this string
and import it into the program or tool of your choice for further analysis.
(*Note: if your tool of choice is [Pandas](http://pandas.pydata.org), you can use
the `stats` attribute [`stats.history_as_DataFrame`](#stats.history_as_DataFrame) to obtain history
directly in the representation you really want.*)
The CSV representation breaks out each argument into its own column,
throwing away information about whether an argument's value was passed or is a default.
>>> print(g.stats.history_as_csv) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
call_num|a|retval|elapsed_secs|process_secs|timestamp|prefixed_fname|caller_chain
2|1|None|...|...|...|'g'|['<module>']
3|2|None|...|...|...|'g'|['<module>']
<BLANKLINE>
Ellipses above are for the `elapsed_secs`, `process_secs` and `timestamp` fields.
The CSV separator is '|' rather than ',' because some of the fields – `args`, `kwargs`
and `caller_chain` – use commas intrinsically. Let's examine one more `history_as_csv`
for a function that has all of those fields:
>>> @log_calls(record_history=True, log_call_numbers=True,
... log_exit=False, log_args=False)
... def f(a, *extra_args, x=1, **kw_args): pass
>>> def g(a, *args, **kwargs): f(a, *args, **kwargs)
>>> @log_calls(log_exit=False, log_args=False)
... def h(a, *args, **kwargs): g(a, *args, **kwargs)
>>> h(0)
h <== called by <module>
f [1] <== called by g <== h
>>> h(10, 17, 19, z=100)
h <== called by <module>
f [2] <== called by g <== h
>>> h(20, 3, 4, 6, x=5, y='Yarborough', z=100)
h <== called by <module>
f [3] <== called by g <== h
>>> print(f.stats.history_as_csv) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
call_num|a|extra_args|x|kw_args|retval|elapsed_secs|process_secs|timestamp|prefixed_fname|caller_chain
1|0|()|1|{}|None|...|...|...|'f'|['g', 'h']
2|10|(17, 19)|1|{'z': 100}|None|...|...|...|'f'|['g', 'h']
3|20|(3, 4, 6)|5|{'y': 'Yarborough', 'z': 100}|None|...|...|...|'f'|['g', 'h']
<BLANKLINE>
As usual, `log_calls` will use whatever names you use for *varargs* parameters
(here, `extra_args` and `kw_args`). Whatever the name of the `kwargs` parameter,
items within that field are guaranteed to be in sorted order (otherwise this
last example would sometimes fail as a doctest).
###[The *history_as_DataFrame* attribute](id:stats.history_as_DataFrame)
The `stats.history_as_DataFrame` attribute returns the history of a decorated
function as a [Pandas](http://pandas.pydata.org) [DataFrame](http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe),
if the Pandas library is installed. This saves you the intermediate step of
calling `DataFrame.from_csv` with the proper arguments (and also saves you from
having to know or care what those are).
If Pandas is not installed, the value of this attribute is `None`.
The documentation for the `record_history` decorator contains an [example
of the `history_as_DataFrame` attribute](./record_history.html#stats.history_as_DataFrame)
which also illustrates its use in an IPython notebook.
###[The *stats.clear_history(max_history=0)* method](id:clear_history-method)
As you might expect, the `stats.clear_history(max_history=0)` method clears
the call history of a decorated function. In addition, it resets all running sums:
`num_calls_total` and `num_calls_logged` are reset to 0, and both
`elapsed_secs_logged` and `process_secs_logged` are reset to 0.0.
**It is the only way to change the value of the `max_history` setting**, via
the optional keyword parameter for which you can supply any (integer) value,
by default 0.
The function `f` has a nonempty history, as we just saw. Let's confirm the
values of all relevant settings and counters:
>>> f.log_calls_settings.max_history
0
>>> f.stats.num_calls_logged
3
>>> f.stats.num_calls_total
3
>>> f.stats.elapsed_secs_logged # doctest: +SKIP
1.3978995411889628e-05
>>> f.stats.process_secs_logged # doctest: +SKIP
1.2999999999985246e-05
Now let's clear `f`'s history, setting `max_history` to 33, and check that settings
and `stats` tallies are reset:
>>> f.stats.clear_history(max_history=33)
>>> f.log_calls_settings.max_history
33
>>> f.stats.num_calls_logged
0
>>> f.stats.num_calls_total
0
>>> f.stats.elapsed_secs_logged
0.0
>>> f.stats.process_secs_logged
0.0
### Data descriptors of *stats* are read-only
The data descriptor stats attributes are all read-only:
>>> f.stats.num_calls_logged = 57 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError: ...
>>> f.stats.num_calls_total = 58 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError: ...
>>> f.stats.elapsed_secs_logged = 0.1 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError: ...
>>> f.stats.process_secs_logged = 0.1 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError: ...
>>> f.stats.history = tuple() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError: ...
>>> f.stats.history_as_csv = '' # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError: ...
>>> f.stats.history_as_DataFrame = '' # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError: ...
You **can** replace the non-data descriptor `clear_history`:
>>> f.stats.clear_history = lambda: None
but it's hard to imagine why one would :)
"""
pass
def main__realistic_logging_example():
"""
##[A realistic logging example – multiple handlers with different loglevels](id:logging-multiple-handlers)
#The basic setup:
#
# This shorter setup doesn't work on 3.4.0 on Linux (Ubuntu 12.04).
# Perhaps the new-in-3.3 handlers parameter to basicConfig
# regressed or anyway wasn't working properly.
# >>> import logging
# >>> import sys
# >>> ch = logging.StreamHandler(stream=sys.stdout)
# >>> logging.basicConfig(handlers=[ch])
# >>> logger = logging.getLogger('mylogger')
# >>> logger.setLevel(logging.DEBUG)
First let's set up a logging with a console handler that writes to `stdout`:
>>> import logging
>>> import sys
>>> ch = logging.StreamHandler(stream=sys.stdout)
>>> c_formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
>>> ch.setFormatter(c_formatter)
>>> logger = logging.getLogger('mylogger')
>>> logger.addHandler(ch)
>>> logger.setLevel(logging.DEBUG)
Now let's add another handler, also sent to `stdout` but best thought of as writing
to a log file. We'll set up the existing console handler with level `INFO`, and
the "file" handler with level `DEBUG` - a typical setup: you want to log all
details to the file, but you only want to write more important messages to
the console.
>>> fh = logging.StreamHandler(stream=sys.stdout)
>>> f_formatter = logging.Formatter('[FILE] %(levelname)8s:%(name)s: %(message)s')
>>> fh.setFormatter(f_formatter)
>>> fh.setLevel(logging.DEBUG)
>>> logger.addHandler(fh)
>>> ch.setLevel(logging.INFO)
Suppose we have two functions: one that's lower-level/often-called,
and another that's higher-level/infrequently called.
>>> @log_calls(logger=logger, loglevel=logging.DEBUG)
... def popular():
... pass
>>> @log_calls(logger=logger, loglevel=logging.INFO)
... def infrequent():
... popular()
Set logger level to `DEBUG` –
the console handler logs calls only for `infrequent`,
but the "file" handler logs calls for both functions.
>>> logger.setLevel(logging.DEBUG)
>>> infrequent() # doctest: +NORMALIZE_WHITESPACE
INFO:mylogger:infrequent <== called by <module>
[FILE] INFO:mylogger: infrequent <== called by <module>
[FILE] DEBUG:mylogger: popular <== called by infrequent
[FILE] DEBUG:mylogger: popular ==> returning to infrequent
INFO:mylogger:infrequent ==> returning to <module>
[FILE] INFO:mylogger: infrequent ==> returning to <module>
Now set logger level to `INFO` –
both handlers logs calls only for `infrequent`:
>>> logger.setLevel(logging.INFO)
>>> infrequent() # doctest: +NORMALIZE_WHITESPACE
INFO:mylogger:infrequent <== called by <module>
[FILE] INFO:mylogger: infrequent <== called by <module>
INFO:mylogger:infrequent ==> returning to <module>
[FILE] INFO:mylogger: infrequent ==> returning to <module>
"""
pass
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# A_meta, a metaclass
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
from collections import OrderedDict
separator = '\n' # default ', ' gives rather long lines
A_DBG_NONE = 0
A_DBG_BASIC = 1
A_DBG_INTERNAL = 2
@log_calls(args_sep=separator, enabled='A_debug=')
class A_meta(type):
@classmethod
@log_calls(log_retval=True)
def __prepare__(mcs, cls_name, bases, **kwargs):
super_dict = super().__prepare__(cls_name, bases, **kwargs)
A_debug = kwargs.pop('A_debug', A_DBG_NONE)
if A_debug >= A_DBG_INTERNAL:
log_calls.print(" mro =", mcs.__mro__)
log_calls.print(" dict from super() = %r" % super_dict)
super_dict = OrderedDict(super_dict)
super_dict['key-from-__prepare__'] = 1729
return super_dict
def __new__(mcs, cls_name, bases, cls_members: dict, **kwargs):
cls_members['key-from-__new__'] = "No, Hardy!"
A_debug = kwargs.pop('A_debug', A_DBG_NONE)
if A_debug >= A_DBG_INTERNAL:
log_calls.print(" calling super() with cls_members =", cls_members)
return super().__new__(mcs, cls_name, bases, cls_members, **kwargs)
def __init__(cls, cls_name, bases, cls_members: dict, **kwargs):
A_debug = kwargs.pop('A_debug', A_DBG_NONE)
if A_debug >= A_DBG_INTERNAL:
log_calls.print(" cls.__mro__:", cls.__mro__)
log_calls.print(" type(cls).__mro__[1] =", type(cls).__mro__[1])
try:
super().__init__(cls_name, bases, cls_members, **kwargs)
except TypeError as e:
# call type.__init__
if A_debug >= A_DBG_INTERNAL:
log_calls.print(" calling type.__init__ with no kwargs")
type.__init__(cls, cls_name, bases, cls_members)
def main__metaclass_example():
"""
##[A metaclass example](id:A-metaclass-example)
The class ``A_meta`` is a metaclass: it derives from ``type``,
and defines (overrides) methods ``__prepare__``, ``__new__`` and ``__init__``.
All of these `log_calls`-decorated methods write their messages using the indent-aware
method :ref:`log_calls.print <log_message_method>`.
All of ``A_meta``'s methods look for an implicit keyword parameter ``A_debug``,
used as the indirect value of the `log_calls` parameter ``enabled``.
The methods treat its value as an integer verbosity level: they write extra messages
when the value of ``A_debug`` is at least ``A_DBG_INTERNAL``.
Rather than make ``A_debug`` an explicit keyword parameter of the metaclass methods,
as in::
def __prepare__(mcs, cls_name, bases, *, A_debug=0, **kwargs):
instead we have left their signatures agnostic. If ``A_debug`` has been passed
by a class definition (as below), the methods use the passed value, and remove
``A_debug`` from ``kwargs``; otherwise they use a default value ``A_DBG_NONE``,
which is less than their threshold value for writing debug messages.
When we include ``A_debug=A_DBG_INTERNAL`` as a keyword argument to a class that
uses ``A_meta`` as its metaclass, that argument gets passed to all of
``A_meta``'s methods, so not only will calls to the metaclass methods be logged,
but those methods will also print extra debugging information:
>>> class A(metaclass=A_meta, A_debug=A_DBG_INTERNAL): # doctest: +NORMALIZE_WHITESPACE
... pass
A_meta.__prepare__ <== called by <module>
arguments:
mcs=<class '__main__.A_meta'>
cls_name='A'
bases=()
**kwargs={'A_debug': 2}
mro = (<class '__main__.A_meta'>, <class 'type'>, <class 'object'>)
dict from super() = {}
A_meta.__prepare__ return value: OrderedDict([('key-from-__prepare__', 1729)])
A_meta.__prepare__ ==> returning to <module>
A_meta.__new__ <== called by <module>
arguments:
mcs=<class '__main__.A_meta'>
cls_name='A'
bases=()
cls_members=OrderedDict([('key-from-__prepare__', 1729),
('__module__', '__main__'),
('__qualname__', 'A')])
**kwargs={'A_debug': 2}
calling super() with cls_members = OrderedDict([('key-from-__prepare__', 1729),
('__module__', '__main__'),
('__qualname__', 'A'),
('key-from-__new__', 'No, Hardy!')])
A_meta.__new__ ==> returning to <module>
A_meta.__init__ <== called by <module>
arguments:
cls=<class '__main__.A'>
cls_name='A'
bases=()
cls_members=OrderedDict([('key-from-__prepare__', 1729),
('__module__', '__main__'),
('__qualname__', 'A'),
('key-from-__new__', 'No, Hardy!')])
**kwargs={'A_debug': 2}
cls.__mro__: (<class '__main__.A'>, <class 'object'>)
type(cls).__mro__[1] = <class 'type'>
A_meta.__init__ ==> returning to <module>
If we had passed `A_debug=A_DBG_BASIC`, then only `log_calls` output would have
been printed: the metaclass methods would not have printed their extra debugging
statements.
If we pass `A_debug=0` (or omit it), we get no printed output at all, either from
`log_calls` or from `A_meta`'s methods:
>>> class AA(metaclass=A_meta, A_debug=False): # no output
... pass
>>> class AAA(metaclass=A_meta): # no output
... pass
"""
# SURGERY:
main__metaclass_example.__doc__ = \
main__metaclass_example.__doc__.replace("__main__", __name__)
def main__functions_and_methods_accessing_their_attrs():
"""
## [Functions and methods accessing their own *log_calls* attributes](id:accessing-own-attrs)
At times you may want a function or method to access the attributes
added for it by `log_calls`. We've seen examples of this, where
[global functions](#log_message) and [methods](#A-metaclass-example) use the indent-aware method `log_message`
to write debugging messages that align properly with those of `log_calls`.
In the metaclass example, two of the methods – an instance method, and
a classmethod – had to perform extra legerdemain in order to get at their
attributes. Happily, those are the only special cases.
This section surveys all the different cases of functions and methods
accessing their `log_calls` attributes.
NOTE: The most artificial aspect of the examples in this section
is that the functions and methods all access their `stats` attribute.
This might be called "excessive introspection", and is probably seldom
useful: when a log_calls-decorated function executes, its call counters
(`stats.num_calls_logged` and `stats.num_calls_total`) have been incremented,
but, as it hasn't yet returned, the value of `stats.elapsed_secs_logged`
(as well as its history) remains as they was before the call began.
We confirm and test this claim in the global and inner functions examples
below.
### [Global functions and inner functions accessing their attributes](id:global-and-inner-functions-accessing-attrs)
Global functions and inner functions can access within their own bodies
the attributes that `log_calls` adds for them (`log_calls_settings`, `stats`, `log_message()`)
using the same syntax that works outside of their bodies.
####[Global function accessing its attributes](id:global-function-accessing-attrs)
A global function can just use the usual syntax:
>>> @log_calls(enabled=2)
... def f():
... f.log_message("f.log_calls_settings.enabled =", f.log_calls_settings.enabled,
... prefix_with_name=True)
... f.log_message("This is call number", f.stats.num_calls_logged)
... f.log_message("f.stats.elapsed_secs_logged is still", f.stats.elapsed_secs_logged)
>>> f()
f <== called by <module>
f: f.log_calls_settings.enabled = 2
This is call number 1
f.stats.elapsed_secs_logged is still 0.0
f ==> returning to <module>
#### [Inner function accessing its attributes](id:inner-function-accessing-attrs)
Similarly, an inner function can just do the usual thing:
>>> @log_calls()
... def outer(x):
... @log_calls(name='%s', enabled=7)
... def inner(y):
... inner.log_message("inner.log_calls_settings.enabled =", inner.log_calls_settings.enabled)
... inner.log_message("call number", inner.stats.num_calls_logged, prefix_with_name=True)
... inner.log_message("elapsed_secs_logged =", inner.stats.elapsed_secs_logged, prefix_with_name=True)
... return x + y
... outer.log_message("inner enabled =", inner.log_calls_settings.enabled, prefix_with_name=True)
... outer.log_message("Before call to inner:", extra_indent_level=-1, prefix_with_name=True)
... outer.log_message("its call number (inner.stats.num_calls_logged) =", inner.stats.num_calls_logged)
... outer.log_message("its elapsed_secs_logged =", inner.stats.elapsed_secs_logged)
... inner(2 * x)
... outer.log_message("After call to inner:", extra_indent_level=-1, prefix_with_name=True)
... outer.log_message("its call number =", inner.stats.num_calls_logged)
... outer.log_message("its elapsed_secs_logged =", inner.stats.elapsed_secs_logged)
We specified `name='%s'` in the decorator of `inner` so that the function's
display name will be just `inner` and not `outer.<locals>.inner`:
>>> outer(3) # doctest: +ELLIPSIS
outer <== called by <module>
arguments: x=3
outer: inner enabled = 7
outer: Before call to inner:
its call number (inner.stats.num_calls_logged) = 0
its elapsed_secs_logged = 0.0
inner <== called by outer
arguments: y=6
inner.log_calls_settings.enabled = 7
inner: call number 1
inner: elapsed_secs_logged = 0.0
inner ==> returning to outer
outer: After call to inner:
its call number = 1
its elapsed_secs_logged = ...
outer ==> returning to <module>
### [Methods accessing their attributes](id:methods-accessing-attrs)
Static methods can access their log_calls-added attributes in a straightforward
way. However, the other kinds of methods – class methods and instance methods –
are different: each requires a unique kind of subterfuge to access its `log_calls`
wrapper and thereby its `log_calls` attributes.
Here's a class exhibiting the full range of possibilities:
>>> class X():
... # Instance methods, including __init__, can obtain their wrappers
... # from their class, via self.__class__.__dict__[method_name]
... @log_calls()
... def __init__(self):
... wrapper = X.__dict__['__init__'] # X not self
... logging_fn = wrapper.log_message
... logging_fn(wrapper.log_calls_settings.enabled)
... logging_fn(wrapper.stats.num_calls_logged)
...
... @log_calls(enabled=2)
... def my_method(self):
... wrapper = X.__dict__['my_method'] # X not self
... logging_fn = wrapper.log_message
... logging_fn(wrapper.log_calls_settings.enabled)
... logging_fn(wrapper.stats.num_calls_logged)
...
... # A classmethod can get at its attributes from its own body,
... # via cls.<classmethod>.__func__
... @classmethod
... @log_calls(enabled=12)
... def my_classmethod(cls):
... logging_fn = cls.my_classmethod.__func__.log_message
... logging_fn(cls.my_classmethod.__func__.log_calls_settings.enabled)
... logging_fn(cls.my_classmethod.__func__.stats.num_calls_logged)
...
... # A staticmethod can access its attributes from its own body
... # in the obvious way, via <class>.<staticmethod>
... @staticmethod
... @log_calls(enabled=22)
... def my_staticmethod():
... logging_fn = X.my_staticmethod.log_message
... logging_fn(X.my_staticmethod.log_calls_settings.enabled)
... logging_fn(X.my_staticmethod.stats.num_calls_logged)
#### [Instance method tests](id:instance-method-accessing-attrs)
>>> x = X() # doctest: +ELLIPSIS
X.__init__ <== called by <module>
arguments: self=<__main__.X object at ...>
True
1
X.__init__ ==> returning to <module>
>>> x.my_method() # doctest: +ELLIPSIS
X.my_method <== called by <module>
arguments: self=<__main__.X object at ...>
2
1
X.my_method ==> returning to <module>
#### [Class method test](id:class-method-accessing-attrs)
>>> x.my_classmethod() # or X.my_classmethod()
X.my_classmethod <== called by <module>
arguments: cls=<class '__main__.X'>
12
1
X.my_classmethod ==> returning to <module>
#### [Static method test](id:static-method-accessing-attrs)
>>> x.my_staticmethod() # or X.my_staticmethod()
X.my_staticmethod <== called by <module>
22
1
X.my_staticmethod ==> returning to <module>
"""
pass
# SURGERY:
main__functions_and_methods_accessing_their_attrs.__doc__ = \
main__functions_and_methods_accessing_their_attrs.__doc__.replace("__main__", __name__)
##############################################################################
# end of tests.
##############################################################################
# For unittest integration
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite())
return tests
if __name__ == "__main__":
doctest.testmod() # (verbose=True)
# unittest.main()
| mit |
JPFrancoia/scikit-learn | sklearn/utils/validation.py | 20 | 26027 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
from .deprecation import deprecated
from ..exceptions import DataConversionWarning as _DataConversionWarning
from ..exceptions import NonBLASDotWarning as _NonBLASDotWarning
from ..exceptions import NotFittedError as _NotFittedError
@deprecated("DataConversionWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class DataConversionWarning(_DataConversionWarning):
pass
@deprecated("NonBLASDotWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class NonBLASDotWarning(_NonBLASDotWarning):
pass
@deprecated("NotFittedError has been moved into the sklearn.exceptions module."
" It will not be available here from version 0.19")
class NotFittedError(_NotFittedError):
pass
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', _NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
uniques = np.unique(lengths)
if len(uniques) > 1:
raise ValueError("Found input variables with inconsistent numbers of"
" samples: %r" % [int(l) for l in lengths])
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2D numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
if ensure_min_samples >= 2:
raise ValueError("%s expects at least 2 samples provided "
"in a 2 dimensional array-like input"
% estimator_name)
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, _DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
_DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
# FIXME NotFittedError_ --> NotFittedError in 0.19
raise _NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
shubhomoydas/pyaad | pyalad/forest_aad_support.py | 1 | 20227 | import os
import numpy as np
import matplotlib.pyplot as plt
import logging
from pandas import DataFrame
from app_globals import *
from alad_support import *
from r_support import matrix, cbind
from forest_aad_detector import *
from data_plotter import *
from results_support import *
from gp_support import *
def get_queried_indexes(scores, labels, opts):
# logger.debug("computing queried indexes...")
queried = np.argsort(-scores)[0:opts.budget]
num_seen = np.cumsum(labels[queried[np.arange(opts.budget)]])
return num_seen, queried
def write_baseline_query_indexes(queried_info, opts):
logger.debug("writing baseline queries...")
queried = np.zeros(shape=(len(queried_info), opts.budget + 2), dtype=int)
num_seen = np.zeros(shape=(len(queried_info), opts.budget + 2), dtype=int)
for i, info in enumerate(queried_info):
num_seen[i, 2:(opts.budget + 2)] = info[0]
num_seen[i, 0] = 1
queried[i, 2:(opts.budget + 2)] = info[1] + 1 # make indexes relative 1, *not* 0
queried[i, 0] = 1
prefix = opts.get_alad_metrics_name_prefix()
baseline_file = os.path.join(opts.resultsdir, "%s-baseline.csv" % (prefix,))
# np.savetxt(baseline_file, num_seen, fmt='%d', delimiter=',')
queried_idxs_baseline_file = os.path.join(opts.resultsdir, "%s-queried-baseline.csv" % (prefix,))
np.savetxt(queried_idxs_baseline_file, queried, fmt='%d', delimiter=',')
def forest_aad_unit_tests_battery(X_train, labels, model, metrics, opts,
outputdir, dataset_name=""):
data_2D = X_train.shape[1] == 2
regcols = ["red", "blue", "green", "brown", "cyan", "pink", "orange", "magenta", "yellow", "violet"]
xx = None; yy = None
if data_2D:
# plot the line, the samples, and the nearest vectors to the plane
xx, yy = np.meshgrid(np.linspace(-4, 8, 50), np.linspace(-4, 8, 50))
# sidebar coordinates and dimensions for showing rank locations of true anomalies
dash_xy = (-4.0, -2.0) # bottom-left (x,y) coordinates
dash_wh = (0.4, 8) # width, height
output_forest_original = False
output_transformed_to_file = False
test_loss_grad = False
plot_dataset = data_2D and True
plot_rectangular_regions = plot_dataset and True
plot_forest_contours = data_2D and True
plot_baseline = data_2D and False
plot_aad = metrics is not None and data_2D and True
pdfpath_baseline = "%s/tree_baseline.pdf" % outputdir
pdfpath_orig_if_contours = "%s/score_contours.pdf" % outputdir
logger.debug("Number of regions: %d" % len(model.d))
tm = Timer()
X_train_new = model.transform_to_region_features(X_train, dense=False)
logger.debug(tm.message("transformed input to region features"))
if plot_dataset:
tm.start()
plot_dataset_2D(X_train, labels, model, plot_rectangular_regions, regcols, outputdir)
logger.debug(tm.message("plotted dataset"))
if output_forest_original:
n_found = evaluate_forest_original(X_train, labels, opts.budget, model, x_new=X_train_new)
np.savetxt(os.path.join(outputdir, "iforest_original_num_found_%s.csv" % dataset_name),
n_found, fmt='%3.2f', delimiter=",")
if plot_forest_contours:
tm.start()
plot_forest_contours_2D(X_train, labels, xx, yy, opts.budget, model,
pdfpath_orig_if_contours, dash_xy, dash_wh)
logger.debug(tm.message("plotted contours"))
if output_transformed_to_file:
write_sparsemat_to_file(os.path.join(outputdir, "forest_features.csv"),
X_train_new, fmt='%3.2f', delimiter=",")
x_tmp = np.vstack((model.d, model.node_samples, model.frac_insts))
write_sparsemat_to_file(os.path.join(outputdir, "forest_node_info.csv"),
x_tmp.T, fmt='%3.2f', delimiter=",")
if test_loss_grad:
test_forest_loss_grad(X_train_new, labels, model, opts)
if plot_baseline:
plot_forest_baseline_contours_2D(X_train, labels, X_train_new, xx, yy, opts.budget, model,
pdfpath_baseline, dash_xy, dash_wh)
if plot_aad and metrics is not None:
plot_aad_2D(X_train, labels, X_train_new, xx, yy, model,
metrics, outputdir, dash_xy, dash_wh)
if False:
plot_aad_gp(X_train, labels, X_train_new, xx, yy, model,
metrics, outputdir, dash_xy, dash_wh)
if False:
plot_aad_score_var(X_train, labels, X_train_new, xx, yy, model,
metrics, outputdir, dash_xy, dash_wh)
def plot_aad_2D(x, y, x_forest, xx, yy, forest, metrics,
outputdir, dash_xy, dash_wh):
# use this to plot the AAD feedback
x_test = np.c_[xx.ravel(), yy.ravel()]
x_if = forest.transform_to_region_features(x_test, dense=False)
queried = np.array(metrics.queried)
for i, q in enumerate(queried):
pdfpath = "%s/iter_%02d.pdf" % (outputdir, i)
dp = DataPlotter(pdfpath=pdfpath, rows=1, cols=1)
pl = dp.get_next_plot()
w = metrics.all_weights[i, :]
Z = forest.get_score(x_if, w)
Z = Z.reshape(xx.shape)
pl.contourf(xx, yy, Z, 20, cmap=plt.cm.get_cmap('jet'))
dp.plot_points(x, pl, labels=y, lbl_color_map={0: "grey", 1: "red"}, s=25)
# print queried[np.arange(i+1)]
# print X_train[queried[np.arange(i+1)], :]
dp.plot_points(matrix(x[queried[np.arange(i+1)], :], nrow=i+1),
pl, labels=y[queried[np.arange(i+1)]], defaultcol="red",
lbl_color_map={0: "green", 1: "red"}, edgecolor=None, facecolors=True,
marker=matplotlib.markers.MarkerStyle('o', fillstyle=None), s=35)
# plot the sidebar
anom_scores = forest.get_score(x_forest, w)
anom_order = np.argsort(-anom_scores)
anom_idxs = np.where(y[anom_order] == 1)[0]
dash = 1 - (anom_idxs * 1.0 / x.shape[0])
plot_sidebar(dash, dash_xy, dash_wh, pl)
dp.close()
def plot_aad_score_var(x, y, x_forest, xx, yy, forest, metrics,
outputdir, dash_xy, dash_wh):
# use this to plot the AAD feedback
x_test = np.c_[xx.ravel(), yy.ravel()]
x_test_forest = forest.transform_to_region_features(x_test, dense=False)
queried = np.array(metrics.queried)
for i, q in enumerate(queried):
pdfpath = "%s/score_iter_%02d.pdf" % (outputdir, i)
dp = DataPlotter(pdfpath=pdfpath, rows=1, cols=1)
pl = dp.get_next_plot()
w = metrics.all_weights[i, :]
s_train = forest.get_score(x_forest, w)
ranked_indexes = np.argsort(-s_train, )
# s_test = forest.get_score(x_test_forest, w)
test_indexes = metrics.test_indexes[i]
score_eval_set = x_test_forest
score_mean, score_var, test_indexes, v_eval, _ = \
get_score_variances(x=x_forest, w=w,
n_test=len(test_indexes) if test_indexes is not None else 10,
ordered_indexes=ranked_indexes,
queried_indexes=queried,
test_indexes=test_indexes,
eval_set=score_eval_set,
n_closest=9)
qpos = np.argmax(score_var)
q = test_indexes[qpos]
logger.debug("score_var:\n%s\ntest_indexes:\n%s" %
(str(list(score_var)), str(list(test_indexes))))
logger.debug("qpos: %d, query instance: %d, var: %f, queried:%s" %
(qpos, q, score_var[qpos], str(list(queried[np.arange(i)]))))
if score_eval_set is not None:
Z = v_eval.reshape(xx.shape)
levels = np.linspace(np.min(v_eval), np.max(v_eval), 20)
CS = pl.contourf(xx, yy, Z, levels, cmap=plt.cm.get_cmap('jet'))
cbar = plt.colorbar(CS)
cbar.ax.set_ylabel('score variance')
dp.plot_points(x, pl, labels=y, lbl_color_map={0: "grey", 1: "red"}, s=25)
dp.plot_points(x[test_indexes, :], pl, marker='o', defaultcol='magenta',
s=60, edgecolor='magenta', facecolors='none')
dp.plot_points(matrix(x[queried[np.arange(i+1)], :], nrow=i+1),
pl, labels=y[queried[np.arange(i+1)]], defaultcol="red",
lbl_color_map={0: "green", 1: "red"}, edgecolor=None, facecolors=True,
marker=matplotlib.markers.MarkerStyle('o', fillstyle=None), s=35)
# plot the sidebar
anom_scores = forest.get_score(x_forest, w)
anom_order = np.argsort(-anom_scores)
anom_idxs = np.where(y[anom_order] == 1)[0]
dash = 1 - (anom_idxs * 1.0 / x.shape[0])
plot_sidebar(dash, dash_xy, dash_wh, pl)
dp.close()
def plot_aad_gp(x, y, x_forest, xx, yy, forest, metrics,
outputdir, dash_xy, dash_wh):
# use this to plot the AAD feedback
x_test = np.c_[xx.ravel(), yy.ravel()]
x_test_forest = forest.transform_to_region_features(x_test, dense=False)
queried = np.array(metrics.queried)
for i, q in enumerate(queried):
pdfpath = "%s/gp_iter_%02d.pdf" % (outputdir, i)
dp = DataPlotter(pdfpath=pdfpath, rows=1, cols=1)
pl = dp.get_next_plot()
w = metrics.all_weights[i, :]
s_train = forest.get_score(x_forest, w)
ranked_indexes = np.argsort(-s_train, )
# s_test = forest.get_score(x_test_forest, w)
gp_eval_set = x_test_forest
gp_score, gp_var, train_indexes, test_indexes, v_eval = \
get_gp_predictions(x=x_forest, y=s_train,
orig_x=x,
ordered_indexes=ranked_indexes,
queried_indexes=queried,
n_train=100, n_test=30, length_scale=40,
eval_set=gp_eval_set, orig_eval_set=x_test,
n_closest=9)
logger.debug("gp_var:\n%s\ntest_indexes:\n%s" % (str(list(gp_var)), str(list(test_indexes))))
if gp_eval_set is not None:
Z = v_eval.reshape(xx.shape)
levels = np.linspace(0., 1., 20)
CS = pl.contourf(xx, yy, Z, levels, cmap=plt.cm.get_cmap('jet'))
cbar = plt.colorbar(CS)
cbar.ax.set_ylabel('score variance')
dp.plot_points(x, pl, labels=y, lbl_color_map={0: "grey", 1: "red"}, s=25)
dp.plot_points(x[train_indexes, :], pl, marker='o', defaultcol='blue',
s=35, edgecolor='blue', facecolors='none')
dp.plot_points(x[test_indexes, :], pl, marker='o', defaultcol='magenta',
s=60, edgecolor='magenta', facecolors='none')
# print queried[np.arange(i+1)]
# print X_train[queried[np.arange(i+1)], :]
dp.plot_points(matrix(x[queried[np.arange(i+1)], :], nrow=i+1),
pl, labels=y[queried[np.arange(i+1)]], defaultcol="red",
lbl_color_map={0: "green", 1: "red"}, edgecolor="black",
marker=matplotlib.markers.MarkerStyle('o', fillstyle=None), s=35)
# plot the sidebar
anom_scores = forest.get_score(x_forest, w)
anom_order = np.argsort(-anom_scores)
anom_idxs = np.where(y[anom_order] == 1)[0]
dash = 1 - (anom_idxs * 1.0 / x.shape[0])
plot_sidebar(dash, dash_xy, dash_wh, pl)
dp.close()
def test_forest_loss_grad(x_forest, y, model, opts):
n = x_forest.shape[0]
bt = get_budget_topK(n, opts)
w = np.ones(len(model.d), dtype=float)
w = w / w.dot(w) # normalized uniform weights
qval = model.get_aatp_quantile(x_forest, w, bt.topK)
w_unifprior = np.ones(len(model.d), dtype=float)
w_unifprior = w_unifprior / w_unifprior.dot(w_unifprior)
print "topK=%d, budget=%d, qval=%8.5f" % (bt.topK, bt.budget, qval)
theta = np.zeros(w.shape, dtype=float)
loss = model.if_aad_loss_linear(w, x_forest[1:10, :], y[1:10], qval,
Ca=opts.Ca, Cn=opts.Cn,
withprior=opts.withprior, w_prior=w_unifprior,
sigma2=opts.priorsigma2)
print "loss: %f" % loss
loss_grad = model.if_aad_loss_gradient_linear(w, x_forest[1:10, :], y[1:10], qval,
Ca=opts.Ca, Cn=opts.Cn,
withprior=opts.withprior, w_prior=w_unifprior,
sigma2=opts.priorsigma2)
logger.debug("loss_grad")
logger.debug(loss_grad)
def evaluate_forest_original(x, y, budget, forest, x_new=None):
original_scores = 0.5 - forest.decision_function(x)
queried = np.argsort(-original_scores)
n_found_orig = np.cumsum(y[queried[np.arange(budget)]])
# logger.debug("original isolation forest:")
# logger.debug(n_found_orig)
if x_new is not None:
w = np.ones(len(forest.d), dtype=float)
w = w / w.dot(w) # normalized uniform weights
agg_scores = forest.get_score(x_new, w)
queried = np.argsort(-agg_scores)
n_found_baseline = np.cumsum(y[queried[np.arange(budget)]])
n_found = np.vstack((n_found_baseline, n_found_orig)).T
else:
n_found = np.reshape(n_found_orig, (1, len(n_found_orig)))
return n_found
def plot_forest_baseline_contours_2D(x, y, x_forest, xx, yy, budget, forest,
pdfpath_contours, dash_xy, dash_wh):
# use this to plot baseline query points.
w = np.ones(len(forest.d), dtype=float)
w = w / w.dot(w) # normalized uniform weights
baseline_scores = forest.get_score(x_forest, w)
queried = np.argsort(-baseline_scores)
n_found = np.cumsum(y[queried[np.arange(budget)]])
print n_found
dp = DataPlotter(pdfpath=pdfpath_contours, rows=1, cols=1)
pl = dp.get_next_plot()
x_test = np.c_[xx.ravel(), yy.ravel()]
x_if = forest.transform_to_region_features(x_test, dense=False)
y_if = forest.get_score(x_if, w)
Z = y_if.reshape(xx.shape)
pl.contourf(xx, yy, Z, 20, cmap=plt.cm.get_cmap('jet'))
dp.plot_points(x, pl, labels=y, lbl_color_map={0: "grey", 1: "red"}, s=25)
# print queried[np.arange(i+1)]
# print X_train[queried[np.arange(i+1)], :]
dp.plot_points(matrix(x[queried[np.arange(budget)], :], nrow=budget),
pl, labels=y[queried[np.arange(budget)]], defaultcol="red",
lbl_color_map={0: "green", 1: "red"}, edgecolor="black",
marker=matplotlib.markers.MarkerStyle('o', fillstyle=None), s=35)
# plot the sidebar
anom_idxs = np.where(y[queried] == 1)[0]
dash = 1 - (anom_idxs * 1.0 / x.shape[0])
plot_sidebar(dash, dash_xy, dash_wh, pl)
dp.close()
def plot_forest_contours_2D(x, y, xx, yy, budget, forest, pdfpath_contours, dash_xy, dash_wh):
# Original detector contours
baseline_scores = 0.5 - forest.decision_function(x)
queried = np.argsort(-baseline_scores)
# logger.debug("baseline scores:%s\n%s" % (str(baseline_scores.shape), str(list(baseline_scores))))
n_found = np.cumsum(y[queried[np.arange(budget)]])
print n_found
Z_if = 0.5 - forest.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z_if = Z_if.reshape(xx.shape)
dp = DataPlotter(pdfpath=pdfpath_contours, rows=1, cols=1)
pl = dp.get_next_plot()
pl.contourf(xx, yy, Z_if, 20, cmap=plt.cm.get_cmap('jet'))
dp.plot_points(x, pl, labels=y, lbl_color_map={0: "grey", 1: "red"})
dp.plot_points(matrix(x[queried[np.arange(budget)], :], nrow=budget),
pl, labels=y[queried[np.arange(budget)]], defaultcol="red",
lbl_color_map={0: "green", 1: "red"}, edgecolor="black",
marker=matplotlib.markers.MarkerStyle('o', fillstyle=None), s=35)
# plot the sidebar
anom_idxs = np.where(y[queried] == 1)[0]
dash = 1 - (anom_idxs * 1.0 / x.shape[0])
plot_sidebar(dash, dash_xy, dash_wh, pl)
dp.close()
def plot_dataset_2D(x, y, forest, plot_regions, regcols, pdf_folder):
# use this to plot the dataset
treesig = "_%d_trees" % forest.n_estimators if plot_regions else ""
pdfpath_dataset = "%s/synth_dataset%s.pdf" % (pdf_folder, treesig)
dp = DataPlotter(pdfpath=pdfpath_dataset, rows=1, cols=1)
pl = dp.get_next_plot()
# dp.plot_points(x, pl, labels=y, lbl_color_map={0: "grey", 1: "red"})
dp.plot_points(x[y==0, :], pl, labels=y[y==0], defaultcol="grey")
dp.plot_points(x[y==1, :], pl, labels=y[y==1], defaultcol="red", s=26, linewidths=1.5)
if plot_regions:
# plot the isolation forest tree regions
axis_lims = (plt.xlim(), plt.ylim())
for i, regions in enumerate(forest.regions_in_forest):
for region in regions:
region = region.region
plot_rect_region(pl, region, regcols[i % len(regcols)], axis_lims)
dp.close()
def prepare_forest_aad_debug_args():
datasets = ["abalone", "ann_thyroid_1v3", "cardiotocography_1", "covtype_sub",
"kddcup_sub", "mammography_sub", "shuttle_sub", "yeast", "toy", "toy2"]
dataset = datasets[9]
datapath = "./datasets/anomaly/%s/fullsamples/%s_1.csv" % (dataset, dataset)
outputdir = "./temp"
budget = 35
n_runs = 2
inference_type = AAD_RSFOREST
# inference_type = AAD_HSTREES
# inference_type = AAD_IFOREST
sigma2 = 0.5
n_jobs = 4
add_leaves_only = False
plot2D = True
streaming = True
stream_window = 64
allow_stream_update = True
if inference_type == AAD_IFOREST:
n_trees = 100
forest_max_depth = 100
score_type = IFOR_SCORE_TYPE_CONST
ensemble_score = ENSEMBLE_SCORE_LINEAR
Ca = 100.
Cx = 0.001
elif inference_type == AAD_HSTREES:
n_trees = 25
forest_max_depth = 7
score_type = HST_SCORE_TYPE
ensemble_score = ENSEMBLE_SCORE_LINEAR
Ca = 1.
Cx = 0.001
elif inference_type == AAD_RSFOREST:
n_trees = 30
forest_max_depth = 7
score_type = RSF_LOG_SCORE_TYPE
# score_type = RSF_SCORE_TYPE
# score_type = ORIG_TREE_SCORE_TYPE
ensemble_score = ENSEMBLE_SCORE_LINEAR
Ca = 1.
Cx = 0.001
else:
raise ValueError("Invalid inference type %s" % inference_type)
args = get_forest_aad_args(dataset=dataset, n_trees=n_trees,
detector_type=inference_type,
forest_add_leaf_nodes_only=add_leaves_only,
forest_score_type=score_type,
forest_max_depth=forest_max_depth,
ensemble_score=ensemble_score,
sigma2=sigma2, Ca=Ca, Cx=Cx,
budget=budget, reruns=n_runs, n_jobs=n_jobs,
log_file="./temp/aad.log", plot2D=plot2D,
streaming=streaming, stream_window=stream_window,
allow_stream_update=allow_stream_update)
args.datafile = datapath
args.resultsdir = os.path.join(outputdir, args.dataset,
"%s_trees%d_samples%d_q%d_bd%d_nscore%d%s_tau%1.2f_sig%4.3f_ca%1.0f_cx%4.3f_%s%s%s" %
(detector_types[args.detector_type], args.ifor_n_trees, args.ifor_n_samples,
args.querytype, args.budget, args.forest_score_type,
"" if not args.forest_add_leaf_nodes_only else "_leaf",
args.tau,
args.sigma2, args.Ca, args.Cx, ensemble_score_names[ensemble_score],
"" if args.detector_type == AAD_IFOREST else "_d%d" % args.forest_max_depth,
"_stream" if streaming else ""))
dir_create(args.resultsdir)
return args | mit |
apdavison/IzhikevichModel | PyNN/izhikevich2004.py | 1 | 15657 | """
This script reproduces Fig. 1 of Izhikevich (2004).
Original implementation references:
Izhikevich E.M. (2004) Which Model to Use for Cortical Spiking Neurons?
IEEE Transactions on Neural Networks, 15:1063-1070 (special issue on temporal coding)
Izhikevich E.M. (2003) Simple Model of Spiking Neurons.
IEEE Transactions on Neural Networks, 14:1569- 1572
http://www.izhikevich.org/publications/whichmod.htm
http://izhikevich.org/publications/figure1.m
See http://www.opensourcebrain.org/projects/izhikevichmodel/wiki for info on issues with the current implementation.
Usage: python izhikevich2004.py <simulator>
where <simulator> is neuron, nest, brian, or another PyNN backend simulator
Requirements: PyNN 0.8 and one or more PyNN-supported simulators
Units: all times are in milliseconds, voltages in millivolts and currents in nanoamps
Version 0.1 - original script written by Vitor Chaud during Google Summer of Code 2013
Version 0.2 - script condensed and updated to use latest development version of PyNN by Andrew Davison, February and September 2014
:copyright: Copyright 2013-2014 Vitor Chaud, Andrew Davison and Padraig Gleeson
:license: Modified BSD, see LICENSE for details.
"""
from __future__ import division
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from pyNN.utility import get_simulator, normalized_filename
global_time_step = 0.01
plt.rcParams.update({
'lines.linewidth': 0.5,
'legend.fontsize': 'small',
'axes.titlesize': 'small',
'font.size': 6,
'savefig.dpi': 200,
})
def run_simulation(time_step=global_time_step, a=0.02, b=0.2, c=-65.0, d=6.0,
u_init=None, v_init=-70.0, waveform=None, t_stop=100.0,
title="", scalebar_level=0, label_scalebar=False):
"""
Run a simulation of a single neuron.
Arguments:
time_step - time step used in solving the differential equations
a - time scale of the recovery variable u
b - sensitivity of u to the subthreshold fluctuations of the membrane potential v
c - after-spike reset value of v
d - after-spike reset of u
u_init - initial value of u
v_init - initial value of v
waveform - a tuple of two NumPy arrays, containing time and amplitude data for the injected current
t_stop - duration of the simulation
title - a title to be added to the figure panel for this simulation
scalebar_level - a value between 0 and 1, controlling the vertical placement of the scalebar
label_scalebar - True or False, whether to add a label to the scalebar
"""
global j, fig, gs
# create a neuron and current source
sim.setup(timestep=time_step, min_delay=time_step)
if u_init is None:
u_init = b * v_init
initialValues = {'u': u_init, 'v': v_init}
cell_type = sim.Izhikevich(a=a, b=b, c=c, d=d, i_offset=0.0)
neuron = sim.create(cell_type)
neuron.initialize(**initialValues)
neuron.record('v')
times, amps = waveform
injectedCurrent = sim.StepCurrentSource(times=times, amplitudes=amps)
injectedCurrent.inject_into(neuron)
# run the simulation and retrieve the recorded data
sim.run(t_stop)
data = neuron.get_data().segments[0]
# plot the membrane potential and injected current
gs1 = gridspec.GridSpecFromSubplotSpec(2, 1,
subplot_spec=gs[j//4, j%4],
height_ratios=[8, 1],
hspace=0.0)
ax1 = plt.subplot(gs1[0])
ax2 = plt.subplot(gs1[1])
j += 1
for ax in (ax1, ax2):
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.spines['left'].set_color('None')
ax.spines['right'].set_color('None')
ax.spines['bottom'].set_color('None')
ax.spines['top'].set_color('None')
ax.set_xlim(0.0, t_stop)
ax1.set_title(title)
vm = data.filter(name='v')[0]
i_times, i_vars = stepify(times, amps)
ax1.plot(vm.times, vm)
ax1.set_ylim(-90, 30)
ax2.plot(i_times, i_vars, 'g')
ymin, ymax = amps.min(), amps.max()
padding = (ymax - ymin)/10
ax2.set_ylim(ymin - padding, ymax + padding)
# scale bar
scalebar_y = ymin + (ymax - ymin) * scalebar_level
ax2.plot([t_stop - 20, t_stop], [scalebar_y, scalebar_y],
color='k', linestyle='-', linewidth=1)
if label_scalebar:
ax.text(t_stop, ymin + padding, "20 ms", fontsize=4, horizontalalignment='right')
plt.show(block=False)
fig.canvas.draw()
def step(amplitude, t_stop):
"""
Generate the waveform for a current that starts at zero and is stepped up
to the given amplitude at time t_stop/10.
"""
times = np.array([0, t_stop/10, t_stop])
amps = np.array([0, amplitude, amplitude])
return times, amps
def pulse(amplitude, onsets, width, t_stop, baseline=0.0):
"""
Generate the waveform for a series of current pulses.
Arguments:
amplitude - absolute current value during each pulse
onsets - a list or array of times at which pulses begin
width - duration of each pulse
t_stop - total duration of the waveform
baseline - the current value before, between and after pulses.
"""
times = [0]
amps = [baseline]
for onset in onsets:
times += [onset, onset + width]
amps += [amplitude, baseline]
times += [t_stop]
amps += [baseline]
return np.array(times), np.array(amps)
def ramp(gradient, onset, t_stop, baseline=0.0, time_step=global_time_step, t_start=0.0):
"""
Generate the waveform for a current which is initially constant
and then increases linearly with time.
Arguments:
gradient - gradient of the ramp
onset - time at which the ramp begins
t_stop - total duration of the waveform
baseline - current value before the ramp
time_step - interval between increments in the ramp current
t_start - time at which the waveform begins (used to construct waveforms
containing multiple ramps).
"""
if onset > t_start:
times = np.hstack((np.array((t_start, onset)), # flat part
np.arange(onset + time_step, t_stop + time_step, time_step))) # ramp part
else:
times = np.arange(t_start, t_stop + time_step, time_step)
amps = baseline + gradient*(times - onset) * (times > onset)
return times, amps
def stepify(times, values):
"""
Generate an explicitly-stepped version of a time series.
"""
new_times = np.empty((2*times.size - 1,))
new_values = np.empty_like(new_times)
new_times[::2] = times
new_times[1::2] = times[1:]
new_values[::2] = values
new_values[1::2] = values[:-1]
return new_times, new_values
# == Get command-line options, import simulator backend =====================
sim, options = get_simulator()
# == Initialize figure ======================================================
j = 0
plt.ion()
fig = plt.figure(1, facecolor='white', figsize=(6, 6))
gs = gridspec.GridSpec(5, 4)
gs.update(hspace=0.5, wspace=0.4)
# == Sub-plot A: Tonic spiking ==============================================
t_stop = 100.0
run_simulation(a=0.02, b=0.2, c=-65.0, d=6.0, v_init=-70.0,
waveform=step(0.014, t_stop),
t_stop=t_stop, title='(A) Tonic spiking',
label_scalebar=True)
# == Sub-plot B: Phasic spiking =============================================
t_stop = 200.0
run_simulation(a=0.02, b=0.25, c=-65.0, d=6.0, v_init=-64.0,
waveform=step(0.0005, t_stop),
t_stop=t_stop, title='(B) Phasic spiking')
# == Sub-plot C: Tonic bursting =============================================
_stop = 220.0
run_simulation(a=0.02, b=0.2, c=-50.0, d=2.0, v_init=-70.0,
waveform=step(0.015, t_stop),
t_stop=t_stop, title='(C) Tonic bursting')
# == Sub-plot D: Phasic bursting ============================================
t_stop = 200.0
run_simulation(a=0.02, b=0.25, c=-55.0, d=0.05, v_init=-64.0,
waveform=step(0.0006, t_stop),
t_stop=t_stop, title='(D) Phasic bursting')
# == Sub-plot E: Mixed mode =================================================
t_stop = 160.0
run_simulation(a=0.02, b=0.2, c=-55.0, d=4.0, v_init=-70.0,
waveform=step(0.01, t_stop),
t_stop=t_stop, title='(E) Mixed mode')
# == Sub-plot F: Spike Frequency Adaptation (SFA) ===========================
t_stop = 85.0
run_simulation(a=0.01, b=0.2, c=-65.0, d=8.0, v_init=-70.0,
waveform=step(0.03, t_stop),
t_stop=t_stop, title='(F) SFA')
# == Sub-plot G: Class 1 excitable ==========================================
'''
Note: This simulation is supposed to use a different parameterization of the
model, i.e.
V' = tau*(0.04*V^2 + 4.1*V + 108 -u + I)
as opposed to
V' = tau*(0.04*V^2 + 5*V + 140 - u + I)
The alternative parameterization is not currently available in PyNN, therefore
the results of this simulation are not expected to match the original figure.
'''
t_stop = 300.0
run_simulation(a=0.02, b=0.2, c=-65.0, d=6.0, v_init=-70.0,
waveform=ramp(0.000075, 30.0, t_stop),
t_stop=t_stop, title='(G) Class 1 excitable')
# == Sub-plot H: Class 2 excitable ==========================================
t_stop = 300.0
run_simulation(a=0.2, b=0.26, c=-65.0, d=0.0, v_init=-64.0,
waveform=ramp(0.000015, 30.0, t_stop, baseline=-0.0005),
t_stop=t_stop, title='(H) Class 2 excitable')
# == Sub-plot I: Spike latency ==============================================
t_stop = 100.0
run_simulation(a=0.02, b=0.2, c=-65.0, d=6.0, v_init=-70.0,
waveform=pulse(0.00671, # 0.00704 in original
[10], 3, t_stop),
t_stop=t_stop, title='(I) Spike latency',
scalebar_level=0.5)
# == Sub-plot J: Subthreshold oscillation ===================================
t_stop = 200.0
run_simulation(a=0.05, b=0.26, c=-60.0, d=0.0, v_init=-62.0,
waveform=pulse(0.002, [20], 5, t_stop),
t_stop=t_stop, title='(J) Subthreshold oscillation',
scalebar_level=0.5)
# == Sub-plot K: Resonator ==================================================
t_stop = 400.0
T1 = t_stop / 10
T2 = T1 + 20
T3 = 0.7 * t_stop
T4 = T3 + 40
run_simulation(a=0.1, b=0.26, c=-60.0, d=-1.0, v_init=-62.0,
waveform=pulse(0.00065, [T1, T2, T3, T4], 4, t_stop),
t_stop=t_stop, title='(K) Resonator',
scalebar_level=0.5)
# == Sub-plot L: Integrator =================================================
'''
Note: This simulation is supposed to use a different parameterization of the
model, i.e.
V' = tau*(0.04*V^2 + 4.1*V + 108 -u + I)
as opposed to
V' = tau*(0.04*V^2 + 5*V + 140 - u + I)
The alternative parameterization is not currently available in PyNN, therefore
the results of this simulation are not expected to match the original figure.
'''
t_stop = 100.0
T1 = t_stop / 11
T2 = T1 + 5
T3 = 0.7 * t_stop
T4 = T3 + 10
run_simulation(a=0.02, b=-0.1, c=-55.0, d=6.0, v_init=-60.0,
waveform=pulse(0.009, [T1, T2, T3, T4], 2, t_stop),
t_stop=t_stop, title='(L) Integrator',
scalebar_level=0.5)
# == Sub-plot M: Rebound spike ==============================================
t_stop = 200.0
run_simulation(a=0.03, b=0.25, c=-60.0, d=4.0, v_init=-64.0,
waveform=pulse(-0.015, [20], 5, t_stop),
t_stop=t_stop, title='(M) Rebound spike')
# == Sub-plot N: Rebound burst ==============================================
t_stop = 200.0
run_simulation(a=0.03, b=0.25, c=-52.0, d=0.0, v_init=-64.0,
waveform=pulse(-0.015, [20], 5, t_stop),
t_stop=t_stop, title='(N) Rebound burst')
# == Sub-plot O: Threshold variability ======================================
t_stop = 100.0
times = np.array([0, 10, 15, 70, 75, 80, 85, t_stop])
amps = np.array([0, 0.001, 0, -0.006, 0, 0.001, 0, 0])
run_simulation(a=0.03, b=0.25, c=-60.0, d=4.0, v_init=-64.0,
waveform=(times, amps),
t_stop=t_stop, title='(O) Threshold variability')
# == Sub-plot P: Bistability ================================================
t_stop = 300.0
T1 = t_stop/8
T2 = 208 # 216.0 in original
run_simulation(a=0.1, b=0.26, c=-60.0, d=0.0, v_init=-61.0,
waveform=pulse(0.00124, [T1, T2], 5, t_stop, baseline=0.00024),
t_stop=t_stop, title='(P) Bistability',
scalebar_level=0.5)
# == Sub-plot Q: Depolarizing after-potential ===============================
t_stop = 50.0
run_simulation(a=1.0, b=0.18, # 0.2 in original
c=-60.0, d=-21.0, v_init=-70.0,
waveform=pulse(0.02, [9], 2, t_stop),
t_stop=t_stop, title='(Q) DAP',
scalebar_level=0.5)
# == Sub-plot R: Accomodation ===============================================
'''
Note: This simulation is supposed to use a different parameterization of the
model, i.e.
u' = tau*a*(b*(V + 65))
as opposed to
u' = tau*a*(b*V - u)
The alternative parameterization is not currently available in PyNN, therefore
the results of this simulation are not expected to match the original figure.
'''
t_stop = 400.0
parts = (ramp(0.00004, 0.0, 200.0),
(np.array([200.0 + global_time_step, 300.0 - global_time_step]), np.array([0.0, 0.0])),
ramp(0.00032, 300.0, 312.5, t_start=300.0),
(np.array([312.5 + global_time_step, t_stop]), np.array([0.0, 0.0])))
totalTimes, totalAmps = np.hstack(parts)
run_simulation(a=0.02, b=1.0, c=-55.0, d=4.0, v_init=-65.0, u_init=-16.0,
waveform=(totalTimes, totalAmps),
t_stop=t_stop, title='(R) Accomodation',
scalebar_level=0.5)
# == Sub-plot S: Inhibition-induced spiking =================================
t_stop = 350.0
run_simulation(a=-0.02, b=-1.0, c=-60.0, d=8.0, v_init=-63.8,
waveform=pulse(0.075, [50], 170, # 200 in original
t_stop, baseline=0.08),
t_stop=t_stop, title='(S) Inhibition-induced spiking')
# == Sub-plot T: Inhibition-induced bursting ================================
'''
Modifying parameter d from -2.0 to -0.7 in order to reproduce Fig. 1
'''
t_stop = 350.0
run_simulation(a=-0.026, b=-1.0, c=-45.0, d=-0.7, v_init=-63.8,
waveform=pulse(0.075, [50], 200, t_stop, baseline=0.08),
t_stop=t_stop, title='(T) Inhibition-induced bursting')
# == Export figure in PNG format ============================================
filename = normalized_filename("results", "izhikevich2004", "png", options.simulator)
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
fig.savefig(filename)
| bsd-3-clause |
adamgreenhall/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tests/plotting/test_series.py | 7 | 30920 | #!/usr/bin/env python
# coding: utf-8
import nose
import itertools
from datetime import datetime
import pandas as pd
from pandas import Series, DataFrame, date_range
from pandas.compat import range, lrange
import pandas.util.testing as tm
from pandas.util.testing import slow
import numpy as np
from numpy.random import randn
import pandas.tools.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works,
_skip_if_no_scipy_gaussian_kde,
_ok_for_gaussian_kde)
""" Test cases for Series.plot """
@tm.mplskip
class TestSeriesPlots(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style='.', logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
_check_plot_works(self.ts[:10].plot.bar)
_check_plot_works(self.ts.plot.area, stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
ax = _check_plot_works(Series(randn(10)).plot.bar, color='black')
self._check_colors([ax.patches[0]], facecolors=['black'])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
@slow
def test_plot_figsize_and_title(self):
# figsize and title
ax = self.series.plot(title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
if self.mpl_ge_1_5_0:
key = 'axes.prop_cycle'
else:
key = 'axes.color_cycle'
colors = self.plt.rcParams[key]
Series([1, 2, 3]).plot()
self.assertEqual(colors, self.plt.rcParams[key])
def test_ts_line_lim(self):
ax = self.ts.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0])
self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1])
tm.close()
ax = self.ts.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0])
self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1])
def test_ts_area_lim(self):
ax = self.ts.plot.area(stacked=False)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
# GH 7471
ax = self.ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET')
ax = tz_ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
ax = tz_ts.plot.area(stacked=False, secondary_y=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
def test_label(self):
s = Series([1, 2])
ax = s.plot(label='LABEL', legend=True)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['None'])
self.plt.close()
# get name from index
s.name = 'NAME'
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['NAME'])
self.plt.close()
# override the default
ax = s.plot(legend=True, label='LABEL')
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
# Add lebel info, but don't draw
ax = s.plot(legend=False, label='LABEL')
self.assertEqual(ax.get_legend(), None) # Hasn't been drawn
ax.legend() # draw it
self._check_legend_labels(ax, labels=['LABEL'])
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
self.assert_numpy_array_equal(np.delete(masked.data, 2), exp)
self.assert_numpy_array_equal(
masked.mask, np.array([False, False, True, False]))
expected = np.array([1, 2, 0, 3], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
s.index.name = 'The Index'
ax = s.plot(use_index=False)
label = ax.get_xlabel()
self.assertEqual(label, '')
ax2 = s.plot.bar(use_index=False)
label2 = ax2.get_xlabel()
self.assertEqual(label2, '')
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot.bar(log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([200, 500]).plot.barh(log=True)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00])
if not self.mpl_le_1_2_1:
expected = np.hstack((1.0e-04, expected, 1.0e+01))
if self.mpl_ge_2_0_0:
expected = np.hstack((1.0e-05, expected))
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar')
ymin = 0.0007943282347242822 if self.mpl_ge_2_0_0 else 0.001
ymax = 0.12589254117941673 if self.mpl_ge_2_0_0 else .10000000000000001
res = ax.get_ylim()
self.assertAlmostEqual(res[0], ymin)
self.assertAlmostEqual(res[1], ymax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh')
res = ax.get_xlim()
self.assertAlmostEqual(res[0], ymin)
self.assertAlmostEqual(res[1], ymax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
ax = df.plot.bar(use_index=False)
self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3'])
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
axes = df.plot()
self._check_ticks_props(axes, xrot=0)
axes = df.plot(rot=30)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw
# semicircle.
series = Series(np.random.randint(1, 5),
index=['a', 'b', 'c', 'd', 'e'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, series.index)
self.assertEqual(ax.get_ylabel(), 'YLABEL')
# without wedge labels
ax = _check_plot_works(series.plot.pie, labels=None)
self._check_text_labels(ax.texts, [''] * 5)
# with less colors than elements
color_args = ['r', 'g', 'b']
ax = _check_plot_works(series.plot.pie, colors=color_args)
color_expected = ['r', 'g', 'b', 'r', 'g']
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
ax = _check_plot_works(series.plot.pie, labels=labels,
colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(series.plot.pie, colors=color_args,
autopct='%.2f', fontsize=7)
pcts = ['{0:.2f}'.format(s * 100)
for s in series.values / float(series.sum())]
iters = [iter(series.index), iter(pcts)]
expected_texts = list(next(it) for it in itertools.cycle(iters))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
self.assertEqual(t.get_fontsize(), 7)
# includes negative value
with tm.assertRaises(ValueError):
series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e'])
series.plot.pie()
# includes nan
series = Series([1, 2, np.nan, 4], index=['a', 'b', 'c', 'd'],
name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, ['a', 'b', '', 'd'])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
ax = s.plot.pie(legend=True)
expected = ['0', '', '2', '3']
result = [x.get_text() for x in ax.texts]
self.assertEqual(result, expected)
@slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 10)
@slow
def test_hist_df_with_nonnumerics(self):
# GH 9853
with tm.RNGContext(1):
df = DataFrame(
np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
df['E'] = ['x', 'y'] * 5
ax = df.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 20)
ax = df.plot.hist() # bins=10
self.assertEqual(len(ax.patches), 40)
@slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
self.assertEqual(len(ax.patches), 2)
@slow
def test_hist_layout(self):
df = self.hist_df
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
df = self.hist_df
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2),
figsize=(12, 7))
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_hist_secondary_legend(self):
# GH 9610
df = DataFrame(np.random.randn(30, 4), columns=list('abcd'))
# primary -> secondary
ax = df['a'].plot.hist(legend=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are draw on left ax
# left axis must be invisible, right axis must be visible
self._check_legend_labels(ax.left_ax,
labels=['a (right)', 'b (right)'])
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> primary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
# right axes is returned
df['b'].plot.hist(ax=ax, legend=True)
# both legends are draw on left ax
# left and right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b'])
self.assertTrue(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
@slow
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list('abc'))
s = Series(np.random.randn(30), name='x')
# primary -> secondary (without passing ax)
ax = df.plot()
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# primary -> secondary (with passing ax)
ax = df.plot()
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# seconcary -> secondary (without passing ax)
ax = df.plot(secondary_y=True)
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, labels=expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True, mark_right=False)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a', 'b', 'c', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_kde(self):
ax = self.ts.plot.hist(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
_check_plot_works(self.ts.plot.kde)
_check_plot_works(self.ts.plot.density)
ax = self.ts.plot.kde(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kde_kwargs(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from numpy import linspace
_check_plot_works(self.ts.plot.kde, bw_method=.5,
ind=linspace(-100, 100, 20))
_check_plot_works(self.ts.plot.density, bw_method=.5,
ind=linspace(-100, 100, 20))
ax = self.ts.plot.kde(logy=True, bw_method=.5,
ind=linspace(-100, 100, 20))
self._check_ax_scales(ax, yaxis='log')
self._check_text_labels(ax.yaxis.get_label(), 'Density')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
s = Series(np.random.uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
# check if the values have any missing values
# GH14821
self.assertTrue(any(~np.isnan(axes.lines[0].get_xdata())),
msg='Missing Values not dropped')
@slow
def test_hist_kwargs(self):
ax = self.ts.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 5)
self._check_text_labels(ax.yaxis.get_label(), 'Frequency')
tm.close()
if self.mpl_ge_1_3_1:
ax = self.ts.plot.hist(orientation='horizontal')
self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
tm.close()
ax = self.ts.plot.hist(align='left', stacked=True)
tm.close()
@slow
def test_hist_kde_color(self):
ax = self.ts.plot.hist(logy=True, bins=10, color='b')
self._check_ax_scales(ax, yaxis='log')
self.assertEqual(len(ax.patches), 10)
self._check_colors(ax.patches, facecolors=['b'] * 10)
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
ax = self.ts.plot.kde(logy=True, color='r')
self._check_ax_scales(ax, yaxis='log')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self._check_colors(lines, ['r'])
@slow
def test_boxplot_series(self):
ax = self.ts.plot.box(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kind_both_ways(self):
s = Series(range(3))
for kind in plotting._common_kinds + plotting._series_kinds:
if not _ok_for_gaussian_kde(kind):
continue
s.plot(kind=kind)
getattr(s.plot, kind)()
@slow
def test_invalid_plot_data(self):
s = Series(list('abcd'))
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name='x')
s_err = np.random.randn(10)
d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y'])
# test line and bar plots
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range('1/1/2000', '1/1/2001', freq='M')
ts = Series(np.arange(12), index=ix, name='x')
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y'])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with tm.assertRaises(ValueError):
s.plot(yerr=np.arange(11))
s_err = ['zzz'] * 10
# in mpl 1.5+ this is a TypeError
with tm.assertRaises((ValueError, TypeError)):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
@slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(Series([1, 2, 3]),
plotting._series_kinds +
plotting._common_kinds)
@slow
def test_standard_colors(self):
for c in ['r', 'red', 'green', '#FF0000']:
result = plotting._get_standard_colors(1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(3, color=[c])
self.assertEqual(result, [c] * 3)
@slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
# multiple colors like mediumaquamarine
for c in colors.cnames:
result = plotting._get_standard_colors(num_colors=1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(num_colors=3, color=[c])
self.assertEqual(result, [c] * 3)
# single letter colors like k
for c in colors.ColorConverter.colors:
result = plotting._get_standard_colors(num_colors=1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(num_colors=3, color=[c])
self.assertEqual(result, [c] * 3)
def test_series_plot_color_kwargs(self):
# GH1890
ax = Series(np.arange(12) + 1).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_kwargs(self):
# #1890
ax = Series(np.arange(12) + 1, index=date_range(
'1/1/2000', periods=12)).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
if self.mpl_ge_1_5_0:
def_colors = self._maybe_unpack_cycler(mpl.rcParams)
else:
def_colors = mpl.rcParams['axes.color_cycle']
index = date_range('1/1/2000', periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
for i in range(ncolors):
ax = s.plot()
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
def test_xticklabels(self):
# GH11529
s = Series(np.arange(10), index=['P%02d' % i for i in range(10)])
ax = s.plot(xticks=[0, 3, 5, 9])
exp = ['P%02d' % i for i in [0, 3, 5, 9]]
self._check_text_labels(ax.get_xticklabels(), exp)
def test_custom_business_day_freq(self):
# GH7222
from pandas.tseries.offsets import CustomBusinessDay
s = Series(range(100, 121), index=pd.bdate_range(
start='2014-05-01', end='2014-06-01',
freq=CustomBusinessDay(holidays=['2014-05-26'])))
_check_plot_works(s.plot)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
topotech/AID_tarea1 | titanic/main.py | 1 | 9651 | # *-* coding: utf-8 *-*
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Parte a)
#pd.options.display.mpl_style = 'default'
plt.style.use('ggplot')
data = pd.read_csv('data/titanic-train.csv', sep=';')
#Parte b)
print "\nForma matriz:" + str(data.shape)
print "--------------------\n"
print "Resumen de los datos:\n" + str(data.describe())
print "--------------------\n"
print "Nombres, tipos y cantidad de datos en cada variable:"
print data.info()
print " =============================================================== \n =============================================================== \n"
#Parte c)
print "Cola de la data:"
print "--------------------\n"
print data.tail()
print "--------------------\n"
print "Cabeza de la data:"
print "--------------------\n"
print data.head()
print "--------------------\n"
print "Datos 200-210:"
print "--------------------\n"
print data[200:210][:]
print "--------------------\n"
print "Cola de la data filtrada:"
print "--------------------\n"
print data[['Sex','Survived']].tail()
print "--------------------\n"
print "Cabeza de la data filtrada:"
print "--------------------\n"
print data[['Sex','Survived']].head()
print "--------------------\n"
print "Datos 200-210 filtrados:"
print "--------------------\n"
print data[['Sex','Survived']][200:210]
print "--------------------\n"
print " =============================================================== \n =============================================================== \n"
#Parte d)
print "Número de personas por sexo:"
print data['Sex'].value_counts()
print''
print "Número de sobrevivientes por sexo"
print data[data['Survived']==1].groupby('Sex').Survived.count()
print''
print "% personas sobrevivientes por sexo (usando la media)"
print data.groupby('Sex').Survived.mean()
print''
print "Data (Survived x Sex)"
print data.groupby('Survived')['Sex'].value_counts()
print''
print "Data (Sex x Survived)"
print data.groupby('Sex')['Survived'].value_counts()
print''
print "Renderizando plot..."
data.groupby('Sex')['Survived'].value_counts().unstack().plot(kind='bar')
plt.show()
print "Data porcentual (Survived x Sex)"
grouped_props = data.groupby('Survived')['Sex'].value_counts()/\
data.groupby('Survived').size()
print grouped_props
print "Renderizando plot..."
grouped_props.unstack().plot(kind='bar')
plt.show()
print " =============================================================== \n =============================================================== \n"
#Parte e)
print "Edad promedio de sobrevivientes y muertos"
print data.groupby('Survived')['Age'].mean()
print ""
print "Cargando boxplots e histograma edad vs supervivencia..."
data.boxplot(column='Age',by='Survived')
data.hist(column='Age',by='Survived')
print "Renderizando plots..."
plt.show()
print "-----\nCantidad de muertos agrupados por conocimiento de edad:"
print "Desconocida:\t" + str(sum(data[data.Survived==0]['Age'].isnull()))
print "Conocida:\t\t" + str(sum(data[data.Survived==0]['Age'].notnull()))
print "\nDatos de persona más vieja:"
print data[data.Age==data['Age'].max()]
print " =============================================================== \n =============================================================== \n"
#Parte f)
# Respuesta a la pregunta final: Una instrucción del tipo 'data[CONDICION][LABEL]' genera una copia de los datos del DataFrame
# Por ello se recomienda usar una linea del tipo DataFrame.loc[row_indexer,col_indexer] para modificar los datos por referencia.
print "Se establece la media aritmética como el valor de reemplazo para las edades nulas. Ya que es una edad estimada " \
"se utiliza la notación xx.5:\n"
M_average_age = float(int(data[data.Sex == 'male']['Age'].mean())) + .5
F_average_age = float(int(data[data.Sex == 'female']['Age'].mean())) + .5
data.loc[(data.Age.isnull()) & (data.Sex =='male'), 'Age'] = M_average_age
data.loc[(data.Age.isnull()) & (data.Sex =='female'), 'Age'] = F_average_age
print "Nueva edad masculina: " + str(M_average_age)
print "Nueva edad femenina: " + str(F_average_age)
print " =============================================================== \n =============================================================== \n"
#Parte g)
#Sabemos por la definición que existe 1º, 2º y 3º clase. Ratificamos:
print "Número de clases: " + str(data['Pclass'].unique().size) + "\n"
print "De los sobrevivientes: ¿cuántas partes constitutía cada clase?:\n"
print data.groupby(['Survived', 'Pclass']).size()/data.groupby(['Survived']).size()
print ""
print "¿Cuántos sobrevivieron de cada clase porcentualmente?:\n"
print data.groupby(['Pclass','Survived']).size()/data.groupby(['Pclass']).size()
females = data[data.Sex == 'female'].groupby(['Survived','Pclass']).size()/\
data[data.Sex == 'female'].groupby(['Survived']).size()
males = data[data.Sex == 'male'].groupby(['Survived','Pclass']).size()/\
data[data.Sex == 'male'].groupby(['Survived']).size()
print "¿Cuál es la proporción de sobreviviente por clase entre los hombres?:\n"
print males
print "¿Cuál es la proporción de sobreviviente por clase entre las mujeres?:\n"
print females
males.unstack().plot(kind='bar')
females.unstack().plot(kind='bar')
plt.show()
print " =============================================================== \n =============================================================== \n"
#Parte h)
print "Predicción con Regla 1 (sobreviven todos los de 1º clase y las mujeres de 2º):"
data['prediction'] = 0
#Se predice supervivencia de mujeres y primera clase. Si es mujer y de tercera, se rectifica a 0.
data.loc[(data.Sex == 'female') | (data.Pclass == 1), 'prediction'] = 1
print "Precisión:\n" + \
str(data[data.prediction == 1][data.Survived == 1].size/float(data[data.prediction == 1].size))
print "Recall\n" + \
str(data[data.prediction == 1][data.Survived == 1].size/float(data[data.Survived == 1].size))
data.to_csv('predicciones-titanic.csv')
print " =============================================================== \n =============================================================== \n"
#Parte i)
print "Nuevo análisis particionando en 5 clases según costo de pasaje:"
_, myBP = data.boxplot(column='Fare', return_type='both')
whiskers = [whiskers.get_ydata() for whiskers in myBP["whiskers"]]
print "Rango de datos relevantes: ["+str(whiskers[0][1])+","+str(whiskers[1][1])+"]" # Imprime [bigote_inf,bigote_sup]
# De la impresión anterior se concluye que los outliers están sobre 65.0
dataFareTyp = data[ data.Fare <= whiskers[1][1] ].copy()
dataFareTyp.hist(column='Fare')
plt.show()
# Analisis de datos
dataFareTypSurv = data[(data.Fare <= whiskers[1][1]) & (data.Survived == 1)]
dataFareTypDied = data[(data.Fare <= whiskers[1][1]) & (data.Survived == 0)]
fig, ax = plt.subplots()
sns.distplot(dataFareTypSurv['Fare'], bins=[0, 10, 20, 30, 40, 65] ) #Curva pequeña
sns.distplot(dataFareTypDied['Fare'], bins=[0, 10, 20, 30, 40, 65] ) #Curva alta
#Muestra nuevo histograma con las nuevas "clases"
plt.show()
#Se usa dataFareTyp para definir nuevo criterio de clasificación económica
dataFareTyp.loc[(dataFareTyp.Fare >= 0) & (dataFareTyp.Fare < 10), 'Pclass'] = 5
dataFareTyp.loc[(dataFareTyp.Fare >= 10) & (dataFareTyp.Fare < 20), 'Pclass'] = 4
dataFareTyp.loc[(dataFareTyp.Fare >= 20) & (dataFareTyp.Fare < 30), 'Pclass'] = 3
dataFareTyp.loc[(dataFareTyp.Fare >= 30) & (dataFareTyp.Fare < 40), 'Pclass'] = 2
dataFareTyp.loc[(dataFareTyp.Fare >= 40) , 'Pclass'] = 1
# Se usa dataFareTyp para definir nuevo criterio de clasificación
# Tras observar el histograma, se define que los muertos serán todos de 5º clase...
print "Predicción con Regla 2 (Se salvan todos menos 5º clase):"
dataFareTyp['prediction'] = 1
data.loc[(data.Pclass == 5) , 'prediction'] = 0
print "Precisión:\n" + \
str(dataFareTyp[dataFareTyp.prediction == 1][dataFareTyp.Survived == 1].size/float(dataFareTyp[dataFareTyp.prediction == 1].size))
print "Recall\n" + \
str(dataFareTyp[dataFareTyp.prediction == 1][dataFareTyp.Survived == 1].size/float(dataFareTyp[dataFareTyp.Survived == 1].size))
print " =============================================================== \n =============================================================== \n"
#Parte j)
print "Se ponen a prueba las dos reglas de predicción sobre los datos de testing:\n"
data = pd.read_csv('data/titanic-test.csv', sep=',')
survival = pd.read_csv('data/titanic-gendermodel.csv', sep=',')
data = data.merge(survival,on='PassengerId').copy()
regla1 = data.copy()
regla2 = data.copy()
regla1['prediction'] = 0
regla1.loc[(regla1.Sex == 'female') | (regla1.Pclass == 1), 'prediction'] = 1
regla2['prediction'] = 1
regla2.loc[regla2.Fare < 10 , 'prediction'] = 0
print "\nRegla 1\n-------"
print "\tPrecisión:\n\t" + \
str(regla1[regla1.prediction == 1][regla1.Survived == 1].size/float(regla1[regla1.prediction == 1].size))
print "\n\tRecall:\n\t" + \
str(regla1[regla1.prediction == 1][regla1.Survived == 1].size/float(regla1[regla1.Survived == 1].size)) + "\n"
print "\nRegla 2\n-------"
print "\tPrecisión:\n\t" + \
str(regla2[regla2.prediction == 1][regla2.Survived == 1].size/float(regla2[regla2.prediction == 1].size))
print "\n\tRecall:\n\t" + \
str(regla2[regla2.prediction == 1][regla2.Survived == 1].size/float(regla2[regla2.Survived == 1].size))
regla1['prediction'] = 0
regla1.loc[(regla1.Sex == 'female') | (regla1.Pclass == 1), 'prediction'] = 1
regla2['prediction'] = 1
regla2.loc[(regla2.Pclass == 5) , 'prediction'] = 0 | unlicense |
bartongroup/RATS | extras/F1000_manuscript/Analysis_scripts/fileutilities.py | 1 | 69314 | #!/homes/kfroussios/bin/python3
"""fileutilities.py
Author: Kimon Froussios
Compatibility tested: python 3.5.2
Last reviewed: 13/01/2017
This module is a solution for Frequently Performed Generic Tasks that involve
multiple files:
* repeating a command for a range of files (not currently parallelized),
* accessing and restructuring (multiple) delimited files.
* miscellaneous stuff. Some of it is auxiliary to the primary functions, some
is a legacy of this module's evolution of concept. Some of it could arguably
be in different libraries, but I see not reason to inflate the dependencies.
I hate sprawling webs of dependencies on non-standard packages more than I
dislike some out-of-place functions.
The primary purpose of this entire module is to encapsulate frequent generic
tasks and related boilerplate code so as to be able to do them from the command
line in a single step, instead of implementing specialized code for each
variation of the task. The module provides a library of flexible functions as
well as a main() implementing the primary use scenarios.
Execute with -h in a shell to obtain syntax and help.
"""
# This module consists of:
# - a class for handling lists of files,
# - a library of functions that perform generic tasks on multiple files, and
# - a main that provides access to most of the above functionality
# NOTE about DataFrame indexes and headers:
# Although dataframes support row and column labels, these make content manipulations
# in the context of this module harder. Instead, any labels present in the text
# input are treated as plain rows or columns. These may be optionally dropped or
# preserved, but the dataframe in-built labels for columns and rows are reserved
# solely for custom use. When appropriate these custom labels will be included in
# the output.
####### TODO SUGGESTIONS #######
#
#
################################
######## UPDATES ###############
#
# 2016-04-13 : Range support added to --cols. Closed-end ranges only.
# 2016-04-13 : Get columns by name support added to --cols.
# 2016-04-13 : Multiple ranges support added for --loop R. NEW SYNTAX!
# 2016-04-13 : Comma separated value support added to TARGET and --cols ONLY! Old syntax also retained.
# Comma lists are not practical for other flags as they accept regex strings.
# 2016-04-13 : Added option to retain fixed number of metadata lines at the top of the files
# --cols, --rndcols and --appnd.
#
################################
import os, sys, string, re, subprocess, random, argparse
import pandas as pd
from builtins import list
from collections import Counter
import mylogs as ml
##### F U N C T I O N S #####
# http://stackoverflow.com/questions/4836710/does-python-have-a-built-in-function-for-string-natural-sort
def natural_sorted(l):
"""Sort list of numbers/strings in human-friendly order.
Args:
l(list): A list of strings.
Returns:
list
"""
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def expand_fpaths(flist):
"""Fully expand and absolute-ify the paths of listed files.
Does not verify path validity. All paths are expanded.
Args:
flist[str]: A list/FilesList of files.
Returns:
[str]: List of expanded paths.
"""
return [os.path.abspath(os.path.expanduser(str(f))) for f in flist]
def are_empty(flist, invalids=True):
"""List the files that are empty.
Args:
flist: A list/FilesList of files to probe.
invalid(bool): How should invalid paths be reported? (Default True)
There is no right or wrong way to classify invalid
paths, it is entirely dependent on the context in which
the files would be used downstream.
Returns:
FilesList: List of empty files.
"""
try:
flist.aliases[0]
except AttributeError:
# If it's a plain list, upgrade it to a FilesList.
flist = FilesList(flist)
result = FilesList()
for i, (myfile, myalias) in flist.enum():
try:
if os.path.getsize(myfile) == 0:
result.append(myfile, myalias)
except OSError:
if invalids:
result.append(myfile, myalias)
return result
def dont_exist(flist):
"""List the files that don't exist.
Args:
flist: A list/FilesList of files to probe.
Returns:
FilesList: List of missing files.
"""
try:
flist.aliases[0]
except AttributeError:
# If it's a plain list, upgrade it to a FilesList.
flist = FilesList(flist)
result = FilesList()
for i, (myfile, myalias) in flist.enum():
if not os.path.exists(myfile):
result.append(myfile, myalias)
return result
def arent_readable(flist):
"""List inaccessible files.
Invalid paths are also reported as inaccessible.
Args:
flist: A list/FilesList of files to probe.
Returns:
FilesList: List of inaccessible files.
"""
try:
flist.aliases[0]
except AttributeError:
# If it's a plain list, upgrade it to a FilesList.
flist = FilesList(flist)
result = FilesList()
for f, (myfile, myalias) in flist.enum():
try:
if not os.access(myfile, os.R_OK):
result.append(myfile, myalias)
except OSError:
result.append(myfile, myalias)
return result
def arent_text(flist, invalids=True):
"""List the file that are (probably) text.
This is achieved by probing the contents and using heuristics to
determine the type of those contents.
Args:
flist: A list/FilesList of files to probe.
invalids(bool): Should invalid paths be reported as not-text?
(Default True)
Returns:
FilesList: List of files that are probably not plain text.
"""
try:
flist.aliases[0]
except AttributeError:
# If it's a plain list, upgrade it to a FilesList.
flist = FilesList(flist)
result = FilesList()
for i, (myfile, myalias) in flist.enum():
try:
with open(myfile) as f:
if not istext(f.read(1024)):
result.append(myfile, myalias)
except IOError:
if invalids:
result.append(myfile, myalias)
return result
# Helper function, string check.
def istext(s):
"""Check if a string is (probably) text.
Use heuristic based on characters contained in the string, adapted from:
http://code.activestate.com/recipes/173220-test-if-a-file-or-string-is-text-or-binary/
Args:
s(str): A string to test.
Returns:
bool
"""
# Copy-pasted. No idea what the code means.
text_characters = "".join(list(map(chr, list(range(32, 127)))) + list("\n\r\t\b"))
_null_trans = string.maketrans("", "")
if "\0" in s:
return False
if not s: # Empty files/strings are considered text
return True
# Get the non-text characters (maps a character to itself then
# use the 'remove' option to get rid of the text characters.)
t = s.translate(_null_trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if float(len(t))/float(len(s)) > 0.30:
return False
return True
def slink(flist, aliases=None, dir="./", autoext=True):
"""Create symbolic links for multiple files.
Create a link for each of the listed paths into the specified directory,
using the specified aliases. Items in the lists will be matched one for
one.
If the aliases argument is omitted, the names for the links will be drawn
from the aliases attribute of the paths list, if it is a FilesList object.
If no aliases exist in either form, the files will be linked in the current
or specified directory, using names their current basename.
If linking to files of the same name located in different directories, a
number will be automatically suffixed to the basename.
Args:
flist[str]: A list/FilesList of paths to link to.
aliases[str]: A list of respective names for the created links. If
omitted, the alias attribute of the flist argument will be
used, and failing that, the existing basenames will be used.
dir(str): The path to the directory in which the links should be
placed. (Default "./")
autoext(bool): Add the file extensions to the created links, if the
links are created from aliases that lack them.
(Default True)
"""
if not aliases:
# No link names provided. Try to find them elsewhere or create them.
try:
# flist is a FilesList and has the aliases attribute.
aliases = flist.aliases
except AttributeError:
# flist is a plain list, so compute the link name from the file name.
aliases = [os.path.basename(p) for p in flist]
# Check for duplicate aliases and amend them.
# This applies mainly to link names automatically created from filenames, as
# the same file name can exist in different directories.
if len(set(aliases)) < len(flist):
aliases = autonumerate(aliases)
# Add extensions where necessary, if desired.
if autoext:
for i in range(0, len(flist)):
(b, p) = os.path.splitext(flist[i])
c = p
# If it's a .gz, include the next nested extension as well.
if p == ".gz":
p = os.path.splitext(b)[1] + p
# Don't duplicate the extension if the alias already has it.
a = os.path.splitext(aliases[i])[1]
if c != a:
aliases[i] = aliases[i] + p
# Link.
for i, mypath in enumerate(flist):
os.symlink(mypath, os.path.join(dir, aliases[i]))
# Helper function.
def autonumerate(things):
"""Detect duplicate entries in a string list and suffix them.
Suffixes are in _N format where N a natural number >=2. Existing suffixes
in that format will also be detected and incremented.
Args:
things[str]: A list of strings.
Returns:
[str]: A corrected list of strings.
"""
c = Counter(things);
# Because I use decrement, reversing the list ensures first instance gets smallest number.
things.reverse()
for i, t in enumerate(things):
n = c[t]
if n > 1: # The first occurrence is not suffixed.
newname = t +'_' + str(n)
while newname in things: # Check for already present suffixes
n += 1
newname = t +'_' + str(n)
things[i] = newname
c[t] -= 1
things.reverse()
return things
def make_names(items, parameters):
"""Automatically create file names based on parameters.
If automatic names happen to turn out identical with one another, unique
numbers are appended to differentiate them. Check documentation for
autonumerate().
Args:
items[str]: A list of strings/filenames/paths to use as the basis for
the output names.
parameters(str,str,str): The first element is the output directory,
the second is a common prefix to add to the names,
the third is a common sufix to add to the names.
Like so: <out[0]>/<out[1]>item<out[2] .
If any of the 3 values in None, no outnames will be made.
Use current directory and empty strings as necessary.
Returns:
[str]: A list of file paths.
"""
outfiles = []
if None not in parameters:
for i in items:
outfiles.append(os.path.join(os.path.abspath(os.path.expanduser(parameters[0])),
parameters[1] + os.path.splitext(os.path.basename(str(i)))[0] + parameters[2]) )
autonumerate(outfiles)
return outfiles
def do_foreach(flist, comm, comments=False, progress=True, out=(None,None,None), log=False):
"""Execute an arbitrary command for each of the listed files.
Enables executing a shell command over a range of items, by inserting the
item values into the command as directed by place-holder substrings.
Although the above is how it is meant to be used, the values in the
FilesList could be overridden to be any arbitrary string, in which case
only two of the placeholders will work correctly (full and alias). The
others are computed from these and may not be sensible when the items are
not file paths.
This is the only function with comments or progress attributes, because
invoked commands print their own output directly, so any informative messages
controlled by this library need to also be inserted in real time.
Args:
flist[]: A FilesList. If a plain list is given, aliases will be
automatically computed.
comm[str]: The components of an arbitrary command, with place-holders
for the item of each iteration. Placeholder strings are:
'***full***' : absolute path of file.
'***path***' : absolute path to the file.
'***file***' : filename without path.
'***core***' : filename without the last extension.
'***alias***': alias for file, by default same as core.
comments(bool): Print commented call details to STDOUT. (Default False)
progress(bool): Show start and completion of iterations on STDERR.
(Default True)
out(str,str,str): The first element is the output directory, the second
is a common prefix to add to the names, the third is a
common suffix to add to the names. Check documentation for
make_names().
log(bool): Log to /commands.log each individual call.
"""
outstream= sys.stdout
# Test if it is a FilesList or plain list. Upgrade it if it's plain.
try:
flist.aliases[0]
except AttributeError:
flist = FilesList(flist)
# Create output files. [] if out contains None.
outfiles = make_names(flist, out)
for i, (myfile, myalias) in flist.enum():
# Substitute place-holders.
command = []
for c in comm:
(mypath, mybase) = os.path.split(str(myfile))
c = c.replace("***full***", str(myfile))
c = c.replace("***path***", mypath)
c = c.replace("***file***", mybase)
c = c.replace("***core***", os.path.splitext(mybase)[0])
c = c.replace("***alias***", str(myalias))
command.append(c)
# Redirect output.
if outfiles:
outstream = open(outfiles[i], 'w')
# Do.
try:
see = " ".join(command)
if log:
ml.log_message(message=see, logfile="./commands.log")
if comments and out == (None,None):
outstream.write(ml.infostring("CWD: "+ os.getcwd() +"\tDO: "+ see))
if progress:
sys.stderr.write(ml.infostring("DO: "+ see))
except IOError:
pass
subprocess.call(command, stdout=outstream, shell=False)
# Optionally identify iteration.
try:
if comments and out == (None,None):
outstream.write(ml.infostring("Finished: "+ str(myalias) +"\n"))
if progress:
sys.stderr.write(ml.infostring("Finished: "+ str(myalias) +"\n"))
except IOError:
pass
finally:
if outfiles:
outstream.close()
def tails(flist, linesToGet=5):
"""Get last lines of text files.
Adapted from:
https://gist.github.com/volker48/3437288
Files that cause an error are still represented in the return value with an
empty sublist, like empty files, in order to maintain the correspondence
between input and output lists.
Args:
flist: A list/FilesList of text files.
lines_to_get: Number of lines to get from the end.
Returns:
[[str]]: A list of lists. Each sublist represents the lines returned by
one file.
Raises:
IOError: With a message about ALL the inaccessible files.
"""
result = []
errors = []
for myfile in flist:
if linesToGet < 1:
result.append([])
continue
try:
with open(myfile, 'r') as f:
# Place the cursor at the last character.
f.seek(-1, os.SEEK_END)
position = f.tell()
linesSeen = 0
# I want the beginning of a line, by proxy of the previous line's newline end.
# So if the last character of the file is a newline, ignore it and go one step back.
if f.read(1) == "\n":
position -= 1
f.seek(position)
# Crawl backwards one character at a time.
while linesSeen < linesToGet and position > 0:
# Try previous character.
position -= 1
f.seek(position)
c = f.read(1)
if c == "\n":
linesSeen += 1
if position == 0:
# The cursor will be right after a newline, unless I hit the beginning
# of the file, in which case the loop leaves it on the second character.
# So bring it manually to the start of the line and file.
f.seek(0)
# Now that the cursor is a the right place, read in the lines.
chunk = []
for line in f:
chunk.append(line)
result.append(chunk)
except IOError as e:
result.append([])
errors.append(e.message)
if errors != []:
raise IOError(" --- ".join(errors))
return result
def heads(flist, linesToGet=5):
"""Get first lines of text files.
Files that cause an error are still represented in the return value with an
empty sublist, like empty files, in order to maintain the correspondence
between input and output lists.
Args:
flist: A list/FilesList of text files.
lines_to_get: Number of lines to get from the top.
Returns:
[[str]]: A list of lists. Each sublist represents the lines returned by
one file.
Raises:
IOError: With a message about ALL the inaccessible files.
"""
result = []
errors = []
for myfile in flist:
if linesToGet < 1:
result.append([])
continue
try:
with open(myfile, 'r') as f:
chunk = []
for i in range(0, linesToGet):
line = f.readline()
if line != "":
# Otherwise it appends empty strings when it hits the end of file.
chunk.append(line)
result.append(chunk)
except IOError as e:
result.append([])
errors.append(e.message)
if errors != []:
raise IOError(" --- ".join(errors))
return result
def swap_strFiles(flist, insep=[","], outsep="\t"):
"""Replace the column separator with a different one.
Supports multiple different delimiters in the input, to support one-step
uniformity when the input files have different delimiters, but ALL input
will be split at ALL/ANY occurring delimiters. If the delimiter of one
file is present in a different use in an other file, the output may not
be what you want.
Although made for converting delimited text, inseps and outsep could be any
substring in a text, delimited or not.
Args:
flist: A list/FilesList of delimited text files.
insep[str]: A list of regex strings. (Default [","])
outsep(str): New column separator. (Default "\t")
Returns:
[str]: A list of strings with the changed delimiters. One string per
file. It is up to you to decide what to do with the new
strings. The order of strings is the same as the input.
"""
input = []
if flist == []:
# Read all data from STDIN at once. Input[] gets a single entry.
input.append(sys.stdin.read())
else:
# Read in all the files entirely. Input[] gets as many entries as there are files.
for myfile in flist:
with open(myfile) as f:
input.append(f.read())
return swap_substr(input, insep, outsep)
# Helper function
def swap_substr(slist, insep=[","], outsep="\t"):
"""Replace all occurrences of insep with outsep.
Insep may be a regex.
Args:
slist[str]: A list of strings.
insep[str]: A list of regex strings. (Default [","])
outsep(str): New substring. (Default "\t")
Returns:
[str]: A list of the edited strings. The order of the strings is the
same as the input.
"""
rx = re.compile("|".join(insep), re.MULTILINE)
result = []
for s in slist:
# Replace all delimiters with the new one.
result.append(rx.sub(outsep, s))
return result
def prepare_df(df, myalias="", keyCol=None, keyhead="row_ID", header=False, cols=None, appendNum=True):
"""Prepare row names and column names.
Assign column as row labels, rename columns based on their position and an
arbitrary alias name for the dataframe, drop the first row.
Args:
df(pandas.DataFrame): A dataframe.
myalias(str): The basename for the relabelling.
header(bool): Remove first row (Default False).
keyCol(int): Column to be used as row index. If None, no index will be
used. (Default None)
keyhead(str): Label for the index.
cols[int]: Custom index numbers for the columns (Default None). If None
then their current index positions are used.
appendNum(bool): Append the columns' positional indices to the alias
when making the new names (True).
Returns:
pandas.DataFrame
"""
# Set row labels.
if keyhead is None:
keyhead = "row_ID"
if keyCol is not None:
# Add index without dropping it, so as not to affect column positions.
df.set_index(df.columns.values.tolist()[keyCol], inplace=True, drop=False)
df.index.name = str(keyhead)
# Make custom column labels, based on alias and column position.
if not cols:
cols = list(range(0, df.shape[1]))
labels = []
if appendNum:
labels = [str(myalias) +"_|"+ str(i) for i in cols]
else:
labels = [str(myalias) for i in cols]
df.columns = labels
# Remove header.
if header:
df.drop(df.index.values.tolist()[0], axis=0, inplace=True)
return df
def count_columns(flist=[None], colSep=["\t"]):
"""Determine the number of fields in each file by inspecting the first row.
Args:
flist: A list of FilesList of files.
colSep[str]: A list of characters used to separate columns.
Returns:
[int]: A list, in the same order as the given files.
"""
tokenizer = re.compile("|".join(colSep))
counts = []
for file in flist:
f = None
if file is None:
f = sys.stdin
file = "<STDIN>"
else:
f = open(file)
while True:
line = f.readline()
# Skip comments.
if line[0] != "#":
counts.append(len( tokenizer.split(line.rstrip()) ))
break
f.readline
if f != sys.stdin:
f.close()
return counts
def get_valuesSet(flist=[None], axis='r', index=0, filter='a', colSep=["\t"]):
""""List the set of different values in the column(s)/row(s).
Args:
flist: A list of FilesList of files.
colSep[str]: A list of characters used to separate columns.
index: Position index of the required column/row.
axis(str): Data slice orientation - 'r' for row, 'c' for column.
filter(str): non redundant set of: 'a' - all, 'u' - unique, 'r' -
repeated values.
Returns:
[[]]: A list of lists. The inner lists represent the sets in order as
requested.
Raises:
ValueError: Invalid axis or filter values.
"""
tokenizer = "|".join(colSep)
result = []
if flist == []:
# Use None as a flag to read from STDIN
flist.append(None)
# Test if it is a FilesList or plain list. Upgrade it if it's plain.
# It will have at least one entry for sure by now, either way.
try:
flist.aliases[0]
except AttributeError:
flist = FilesList(flist)
# Main part of this function.
results = []
for f, (myfile, myalias) in flist.enum():
# Input.
df = None
instream = sys.stdin
if myfile is not None:
instream = open(myfile)
df = pd.read_csv(instream, sep=tokenizer, header=None, index_col=None, comment="#", engine='python')
if instream != sys.stdin:
instream.close()
# Get value set.
values = None
if axis == 'r':
values = df.iloc[int(index),:].tolist()
elif axis == 'c':
values = df.iloc[:,int(index)].tolist()
else:
raise ValueError("".join(["Unrecognized option: axis=", axis]))
# Get count per value
c = Counter(values);
# Filter.
if filter == 'a':
results.append( set(values) )
elif filter == 'u':
results.append( set([v for v in values if c[v] == 1]) ) # set() is redundant but keeps output type consistent
elif filter == 'r':
results.append( set([v for v in values if c[v] > 1]) )
else:
raise ValueError("".join(["Unrecognized option: filter=", filter]))
return results
def get_columns(flist=[None], cols=[0], colSep=["\t"], header=False, index=None, merge=True):
"""Obtain the specified columns.
Comment lines starting with '#' are ignored.
The data columns are assembled into a single DataFrame.
The returned columns will be labeled based on the name of the file they
came from and their position in it. Existing labels are optionally
preserved as the top row or can be skipped entirely.
If an index is specified, it will be used only for merging, and will NOT be
included in the output columns, unless explicitly present in cols[].
Args:
flist: A list/FilesList of delimited plain text files.
header(bool): Crop the header line (first non-comment line). (Default False)
cols[int/str] : A list of positional indexes or names or ranges of the
desired columns. (Default [0]).
colSep[str]: List of characters used as field separators.
(Default ["\t"]).
merge(bool): Concatenate results from all files into a single
dataframe. If False, a list of dataframes is returned
instead. (Default True).
index(int): Column to be used as row index for merging. (Default None)
Returns:
[pandas.DataFrame]: List of DataFrames. If merge=True, only the
first element will be populated.
"""
tokenizer = "|".join(colSep)
result = []
if flist == []:
# Use None as a flag to read from STDIN
flist.append(None)
# Test if it is a FilesList or plain list. Upgrade it if it's plain.
# It will have at least one entry for sure by now, either way.
try:
flist.aliases[0]
except AttributeError:
flist = FilesList(flist)
# Parse.
keyhead = None
for f, (myfile, myalias) in flist.enum():
# I used to use the pandas parser, with my own parser used only as fallback
# for problematic cases. As flexibility requirements increased, using the
# pandas parser became too opaque and difficult to maintain,
# so now all cases are delegated to mine.
df = get_columns_manual(myfile, cols=cols, colSep=colSep, header=header,
alias=myalias, index=index)
if not keyhead:
keyhead = df.index.name
result.append(df)
# Merge.
if merge:
result = [pd.concat(result, axis=1, join='outer', ignore_index=False)]
result[0].index.name = keyhead
return result
# Helper function
def get_columns_manual(file=None, cols=[0], colSep=["\t"], header=False, index=None, alias=None):
"""Get specified columns from a file where rows have varying numbers of fields.
Some tables contain a fixed set of fields followed by optional fields. In
these rare cases, traditional parsers fail due to inconsistent number of
fields. This function provides a work-around for that.
It is entirely the user's responsibility to ensure that the inconsistent
row lengths are not a symptom of table corruption/malformation and that it
is safe and reliable to extract the desired columns. If a row is shorter
than expected, it is padded with the value "IDXERROR". If this value shows
up in your result and you are not explicitly expecting it, you should stop
and seriously examine your input table.
Args:
file(str): A delimited plain text file.
header(bool): If True, the first non-comment line will not be in
the data. (Default False)
cols[int]: A list of positional indexes of the desired columns.
(Default [0]).
colSep[str]: List of regex strings for field separators.
(Default ["\t"]).
index(int): Position of column to be used as row index. (Default None)
alias(str): An alias for the file. Used for naming the columns.
Returns:
pandas.DataFrame: DataFrame with the columns, labeled by original
column number, ordered as specified.
"""
tokenizer = re.compile("|".join(colSep))
# Input source.
f = None
if file is None:
f = sys.stdin
file = "STDIN"
else:
f = open(file)
if alias is None:
alias = FilesList.autoalias(file)
# Import data.
keyhead = None
values = []
labels = []
for l, line in enumerate(f):
if line[0] == '#' or line == "\n":
# Skip comments and empty lines.
continue
else:
# Get the fields.
fields = tokenizer.split(line.rstrip("\n"))
# Column labels from the first non-comment non-empty row,
# regardless of whether they really are labels or not.
if not labels:
labels = fields
# Find out name of row index.
if (not keyhead) and header and (index is not None):
keyhead = str(fields[index])
# Get columns.
selection = []
expandedcols = []
for c in cols:
v = str(c).split(":")
if len(v) == 1:
try:
expandedcols.append(int(v[0]))
except ValueError:
expandedcols.append(labels.index(v[0]))
else:
try:
expandedcols.extend(list(range(int(v[0]), int(v[1]) + 1)))
except TypeError:
expandedcols.extend(list(range(labels.index(v[0]), labels.index(v[1]) + 1)))
for i in expandedcols:
try:
selection.append(fields[i])
except IndexError:
# Silently adding fields is too dangerous, so a flag value is needed.
# Values like None or NA can sometimes be legitimate values for fields.
selection.append("IDXERROR")
# Add the key at the end, where they won't interfere with column numbers.
if index is not None:
selection.append(fields[index])
values.append(selection)
if f != sys.stdin:
f.close()
# Adjust index of row keys to reflect the fact I stuck them at the end.
if index is not None:
index = len(values[0])-1
expandedcols.append("my_garbage_label_row_key")
# Package data nicely.
df = pd.DataFrame(data=values)
df = prepare_df(df, myalias=alias, keyCol=index, header=header, cols=expandedcols,
keyhead=keyhead, appendNum=True if len(expandedcols)>1 else False)
if index is not None:
df.drop(alias+"_|my_garbage_label_row_key", 1, inplace=True)
return df
def get_random_columns(flist, colSep=["\t"], k=1, header=False, index=None, merge=True):
""" Get k random columns from each file.
The returned columns will be labeled based on the name of the file they
came from and their position in it. Existing labels are optionally
preserved as the top row or can be skipped entirely.
If an index is specified, it will be used for merging (if applicable) and
will be included as a column in each output file.
Args:
flist: A list or FilesList of files.
k(int): How many columns to get.
colSep[str]: A list of characters used as field separators.
(Default ["\t"])
header(bool): Strip column headers. (Default False)
index(int): Column to use as row index for merging. (Default None)
merge(bool): Concatenate results from all files into a single
dataframe. If False, a list of dataframes is returned
instead. (Default True).
Returns:
[pandas.DataFrame]: List of DataFrames. If merge=True, only the
first element will be populated.
"""
tokenizer = "|".join(colSep)
# The files may have different number of columns
fieldNums = count_columns(flist, colSep)
result = []
if flist == []:
# Use None as a flag to read from STDIN
flist.append(None)
keyhead = None
# Test if it is a FilesList or plain list. Upgrade it if it's plain.
# get_columns() does this too, but as I call it per item in flist, I *must*
# preserve any alias that is potentially already specified.
try:
flist.aliases[0]
except AttributeError:
flist = FilesList(flist)
# Get columns.
for f, (myfile, myalias) in flist.enum():
cols = []
if index is not None:
# Generate random choice of columns.
# range() is right-open.
cols.extend(random.sample(list(range(0, fieldNums[f]-1)), k))
else:
cols = random.sample(list(range(0,fieldNums[f])), k)
# Would normally delegate the actual getting to get_columns() but there
# are too many little differences to accommodate that complicate the code
# to the point of defeating any benefits from code re-use.
df = pd.read_csv(myfile, sep=tokenizer, header=None, index_col=None, comment="#", engine='python')
if (not keyhead) and header and (index is not None):
keyhead = str(df.iloc[0,index])
# Adjust row and column labels.
df = prepare_df(df, myalias=myalias, keyCol=index, header=header, keyhead=keyhead,
appendNum=True if k>1 else False)
# Slice the part I need.
df = df.iloc[:,cols]
result.append(df)
# Merge.
if merge:
result = [pd.concat(result, axis=1, join='outer', ignore_index=False)]
result[0].index.name = keyhead
return result
def append_columns(flist, colSep=["\t"], header=False, index=None):
"""Append all columns from the files, as they are.
The order of the columns in the output follows the order of the columns in
the files and the order of the files. The files don't need to have the same
number of columns. It is your responsibility to ensure sensible consistency
of number and order of rows across files, otherwise the combined data may
be nonsensical.
This function also supports key-aware appending, using outer-join, when a
row index is specified.
Args:
flist: A list/FilesList of files to combine.
colSep[str]: A list of characters used as field delimiters.
(Default ["\t"])
header(bool): First non-comment line as column labels. (Default False)
index(int): Column to use as row index (same in all files).
(Default None)
Returns:
pandas.Dataframe
"""
try:
flist.aliases[0]
except AttributeError:
flist = FilesList(flist)
# Determine how many columns each file has.
numofcols = count_columns(flist, colSep=colSep)
# Delegate fetching all the columns.
data = []
keyhead = None
for f, (myfile, myalias) in flist.enum():
# List the columns and remove the index one from among them.
cols = [i for i in range(0,numofcols[f]) if i != index]
df =get_columns(FilesList(files=[myfile], aliases=[myalias]), cols=cols,
colSep=colSep, header=header, merge=False, index=index)[0]
data.append( df )
# Merge. Row indexes will have been assigned by get_columns(), if applicable.
keyhead = data[0].index.name
result = pd.concat(data, axis=1, join="outer", ignore_index=False)
result.index.name = keyhead
return result
def get_crosspoints(flist, cols=[0], rows=[0], colSep=["\t"], header=False, index=None, merge=True):
""" Get the values at selected rows and columns.
The values at the intersections of the selected rows and columns are extracted.
Args:
flist: A [str] list or fileutilities.FilesList of delimited text files.
colSep[str]: List of column separators.
cols[int]: List of columns.
rows[int]: List of rows.
header(bool): Whether there is a header line (False).
index(int): Which column has the row labels (None).
merge(bool): Merge results into single table (True).
Returns:
[pandas.DataFrame]:
"""
results = get_columns(flist, cols=cols, colSep=colSep, header=header, merge=merge, index=index)
for i in range(0, len(results)):
results[i] = results[i].iloc[rows,:]
return results
##### C L A S S E S #####
class FilesList(list):
"""A container for a list of files.
An extension of the built-in list, specifically for files, providing a
means to import multiple filenames either from text lists or from
directories. The purpose is to facilitate batch operations and sensible
output of their results.
FilesList is generally backwards compatible with the built-in list and it
should be possible for them to be used interchangeably in most cases. A
plain list can be cast as a FilesList, when necessary, allowing appointment
of default alias values. A FilesList should always work as a plain list
without additional actions (except for its string representation). When a
FilesList is accessed as a plain list, only the full paths will be
accessed. Certain equivalent methods are supplied for
Most basic operations inherited from list are supported. Appending has been
overridden to keep paths and aliases in sync. Sorting, deleting and
inserting of items are currently not supported and will break the
correspondence between paths and aliases.
Attributes defined here:
aliases = [] : Practical aliases for the full file-paths.
"""
def __init__(self, files=None, aliases=None, fromtuples=None):
"""Construct an instance of the FilesList.
A FilesList can b created:
- empty
- from a list of files (with default aliases automatically assigned)
- from a list of files and a list of aliases (in the same order)
- from a list of (file, alias) tuples.
Args:
files[str]: A list of files. (Default None)
aliases[str]: A list of aliases. (Default None)
fromtuples[(str,str)]: A list of tuples (file, alias). (Default
None) If this is specified together with flist and/or
aliases, the data in fromtuples is used only.
"""
# If data is a list of (file, alias) tuples, unpair tuples into lists.
if fromtuples is not None:
data = [list(t) for t in zip(*fromtuples)]
# Any data passed to flist and aliases at method call is discarded.
files = data[0]
aliases = data[1]
# Having aliases without the paths is rather useless.
if aliases:
if not files:
raise ValueError("No files supplied for the aliases.")
else:
# Make None into empty.
aliases = []
# Assign default aliases to be same as files. Expand file paths.
if files is not None:
files = expand_fpaths(files)
if not aliases:
for f in files:
aliases.append(self.autoalias(f))
else:
# If still empty, it was an empty call to the constructor.
files = []
# Create the basic list.
super(FilesList, self).__init__(files)
# Add a plain list attribute for the aliases with default values.
self.aliases = autonumerate(aliases)
def __str__(self):
"""Represent as string.
Overrides built-in list's representation.
Returns:
str
"""
tmp = []
for f, (myfile, myalias) in self.enum():
tmp.append("\t".join([str(f), myfile, myalias]))
tmp.append("")
return "\n".join(tmp)
def to_file(self, outfile=None, mode ='a'):
"""Save list as a text file that can be read back in.
Args:
outfile(str): Output file to write into. If omitted, it only
returns the content as a print-ready string.
(Default None)
mode(str): Append ('a') or overwrite ('w'). (Default 'a')
Returns:
str: A print-ready multi-line string. This is returned even when an
output file is specified and written into.
"""
result = ""
for f, (myfile, myalias) in self.enum():
result += myfile + "\t" + myalias + "\n"
if outfile is not None:
with open(outfile, mode) as out:
out.write(result)
return result
def enum(self):
"""Enumerate as (index, (filepath, filealias)).
Returns:
enumerator"""
return enumerate(zip(self, self.aliases))
def get(self, loc):
"""Access path and alias at specified location as tuple.
Args:
loc[int]: Index of item to get.
Returns:
(str,str): Tuple (path, alias).
"""
return (self[loc], self.aliases[loc])
def append(self, myfile, myalias=None):
"""Appends value to both the paths list and the aliases list.
This method overrides the built-in append() of list. It is backwards
compatible by automatically guessing an alias.
This reduces the risk of the paths and aliases going out of sync due
to items being manually added without updating the aliases.
It is still possible to break the sync by manually adding items to the
aliases.
Args:
myfile(str): File (path will be expanded).
myalias(str): Alias for the file (Default None).
"""
if myfile is not None:
myfile = os.path.abspath(os.path.expanduser(myfile))
super(FilesList, self).append(myfile)
if not myalias:
myalias = self.autoalias(myfile)
self.aliases.append(myalias)
self.aliases = autonumerate(self.aliases)
def populate_from_files(self, myfiles, colSep="\t"):
"""Parse the list of files from one or multiple text files.
Read in multiple lists of files from text and append them to the
FilesList. All paths are automatically expanded and converted to
absolute paths. Because of this, each file may also be given a more
convenient alias. If no alias is given, the filename as supplied is
used as the alias instead. The paths are not checked for existence.
Existing contents of the object are kept and the new contents are
appended.
Input file format (no spaces allowed inside names):
#comment
path1/file1 alias1-prefix alias1-suffix1 alias1-suffix2
path1/file2 alias1-prefix alias1-suffix3
path2/file3 alias3
path3/file4
Args:
file[str]: A list of text files each containing a list of files.
colSep(str): Column separator. (Default "\\t")
Returns:
FilesList: Returns self, to facilitate instantiation shortcuts.
"""
# Read new list.
paths = []
for myfile in myfiles:
with open(myfile, 'rU') as input:
for line in input:
if line == "\n":
# Skip empty lines.
continue
elif line[0] == '#':
# Skip comments.
continue
else:
fields = line.rstrip().split(colSep)
paths.append(fields[0])
# Store the alias for the file.
if len(fields) > 1:
self.aliases.append("_".join(fields[1:]))
# If an alias was not specified, re-use the filepath given.
else:
self.aliases.append(self.autoalias(fields[0]))
# Expand to absolute paths and add to main self list.
self.extend(expand_fpaths(paths))
self.aliases = autonumerate(self.aliases)
return self
def populate_from_directories(self, dirpaths, patterns=None):
"""Get files based on naming patterns from within a list of directories.
Useful for selecting among files that follow a naming convention. The
convention is represented by a list of regex strings, at least one of
which has to match.
File paths will be expanded automatically. The filenames will be used
as the aliases.
Existing contents of the object are kept and the new contents are
appended.
Args:
dirpaths[str]: A list/FilesList of paths to directories from where
to get files.
patterns[str]: A list of regex strings. Only files matching at least
one of these will be returned. The patterns will be
matched anywhere in the filenames.
Returns:
FilesList: Returns self, to facilitate instantiation shortcuts.
"""
rx = []
if patterns:
rx = [re.compile(p) for p in patterns]
cwd = os.getcwd()
for d in dirpaths:
try:
os.chdir(d)
for f in os.listdir('./'):
if f in ["","\n",".",".."]:
continue
if not patterns:
# No filter.
self.append(f, self.autoalias(f))
else:
for p in rx:
if p.search(f):
self.append(f, self.autoalias(f))
break
finally:
# Ensure return to original directory to prevent errors in
# external code that uses the class.
os.chdir(cwd)
self.aliases = autonumerate(self.aliases)
return self.sorted()
# Helper function.
@staticmethod
def autoalias(pathname):
"""Strip a path to the base filename."""
if pathname is None:
return None
else:
return os.path.splitext(os.path.basename(pathname))[0]
def sorted(self):
"""Sorted copy.
Returns:
FilesList
"""
d = dict()
for i, (myfile, myalias) in self.enum():
d[myfile] = myalias
sk = natural_sorted(list(d.keys()))
newFL = FilesList()
for k in sk:
newFL.append(k, d[k])
return newFL
# Dispatch for similar functions.
_funcDispatch = {"dont_exist" : dont_exist,
"arent_access" : arent_readable,
"are_empty" : are_empty, # Doesn't make sense from command line
# because of lack of control of non-existence value.
"arent_text" : arent_text
}
def store_metadata(flist, numoflines):
"""Store top lines of files into dictionary.
Args:
flist: A list or FilesList.
numoflines(int): Number of lines to save.
Returns:
dict[]: The items of flist[] are used as keys.
"""
metadata = dict()
for myfile in flist:
if myfile is None:
fin = sys.stdin
else:
fin = open(myfile)
lines = []
for i in range(0, numoflines):
lines.append(fin.readline())
metadata[myfile] = "".join(lines)
if fin != sys.stdin:
fin.close()
return metadata
##### M A I N #####
def main(args):
"""Provide command-line access to the module's functionality.
The functionality and format of main is subject to change, as the module
expands and evolves to suit my needs. Main() is not intended to be called
from within other code.
Optional short info is printed in commented lines. The info always
*succeeds* the relevant output, rather than precede it. This serves as
confirmation that the task completed. Calling details of the script are
recorded in commented lines at the top of the output.
For more info on the functionality, read the above documentation of the
classes and functions. For usage syntax, execute the module with the -h
argument.
"""
# Organize arguments and usage help:
parser = argparse.ArgumentParser(description="Provide INPUTTYPE and TARGETs \
*before* providing any of the other parameters. This is due to many \
parameters accepting an indefinite number of values. Only one task at a time.")
# Input/Output.
parser.add_argument('INPUTTYPE', type=str, choices=['L','T','D','P'],
help=" Specify the type of the TARGETs: \
'T' = The actual input filess. \
'L' = Text file(s) listing the input files. \
'P' = Get list of input files from STDIN pipe. \
'D' = Input data directly from STDIN pipe. \
('D' is compatible with only some of the functions)")
parser.add_argument('TARGET', type=str, nargs='*',
help=" The targets, space- or comma-separated. Usually files. \
Look into the specific task details below for special uses. \
Do not specify with INPUTTYPE 'P' or 'D'.")
parser.add_argument('-O','--out', type=str, nargs=3,
help=" Send individual outputs to individual files instead of \
merging them to STDOUT. Output files will be like \
<out[0]>/<out[1]>target<out[2]>")
# Parameters.
parser.add_argument('-L','--log', action='store_true',
help=" Log this command to ./commands.log.")
parser.add_argument('-c','--comments', action='store_true',
help=" Include commented info to STDOUT or files. (Default don't include)")
parser.add_argument('-C','--STDERRcomments', action="store_false",
help=" Do NOT show info in STDERR. (Default show)")
parser.add_argument('-s','--sep', type=str, default=["\t"], nargs='+',
help=" A list of input field separators. The first value \
will be used for all output. (Default \\t, bash syntax for tab: $'\\t').")
parser.add_argument('-l','--labels', action='store_true',
help=" Discard column headers (first content line) in input files. (Default do not discard)")
parser.add_argument('-r','--relabel', action='store_false',
help=" Do NOT create new column headers that reflect the origin of the columns. (Default create)")
parser.add_argument('-i','--index', action='store_true',
help=" Use column 0 as row index. The index will always be included in the output. (Default no index)")
parser.add_argument('-M','--metadata', type=int, default=0,
help=" Number of metadata lines at the \
beginning of input data (Default 0). Metadate will be read separately \
and re-added verbatim into the output.")
# General tasks.
parser.add_argument('--probe', type=str, choices=list(_funcDispatch.keys()),
help=" Do one of a these simple checks on the target files.")
parser.add_argument('--dir', type=str, nargs='*',
help=" List the contents of the target paths. \
Full absolute file paths are returned. Each file is also given an alias. \
Supplying an optional list of regex patterns enables filtering of the result.")
parser.add_argument('--link', type=str, nargs='+',
help=" Create symbolic links for the targets into the specified directory. \
Any additional values are used as respective names for the links, one for one, \
otherwise the aliases or basenames will be used, enumerated when necessary.")
parser.add_argument('--loop', type=str, nargs='+',
help=" Repeat the specified shell command for each target value. \
The fist value of this parameter determines what the target values are: \
'S'= strings: paths/files/strings, 'R'= range: ranges of positive integers in x:y format. \
Target PLACEHOLDERS: ***full***, ***path***, ***file***, \
***core***, ***alias***. \
If looping over a NUMERICAL RANGE use any of the last 3 placeholders. \
The nested command should be supplied as a list of components, not as a single string. \
Options intended for the nested command should be preceded \
by a '+' sign like this: '+-v'.")
# Delimited file tasks.
parser.add_argument('--swap', type=str,
help=" Replace all occurrences of the --sep values with the value supplied here.\
** Bash syntax for tab: $'\\t'. Compatible with 'D' as INPUTTYPE.")
parser.add_argument('--cntcols', action='store_true',
help="Count the number of fields in the first row of each target file.")
parser.add_argument('--cols', nargs='+',
help="Extract the specified columns (named or 0-indexed) from each target. \
Column ranges in x:y format closed at both ends. \
Negative indices must be escaped first: \-1. Compatible with 'D' as INPUTTYPE.")
parser.add_argument('--rndcols', type=int,
help="Randomly select this many columns from the target files. \
With --index, the index column will not be part of the random selection.")
parser.add_argument('--appnd', action='store_true',
help="Append all the columns of the target files into a single table.")
parser.add_argument('--valset', nargs=3,
help="Get the non-redundant set of values in the given row/column. \
Takes three arguments: (i) orientation 'r' for row or 'c' for column, \
(ii) position index of the row/column, (iii) repetition filter: \
'a' all values, 'u' unique values only, 'r' only values with two or more instances.")
params = parser.parse_args(args)
# INPUT ###################################################################
targets = []
for t in params.TARGET:
v = t.split(",")
if len(v) == 1:
targets.append(t)
else:
targets.extend(v)
flist = None
if params.INPUTTYPE == 'P':
# Read files list from STDIN
flist = FilesList()
for line in sys.stdin:
fields = line.rstrip("\n").split("\t")
if fields[0] != "":
try:
flist.append(fields[0], fields[1])
except IndexError:
flist.append(fields[0])
elif params.INPUTTYPE == 'L':
# Create the FilesList, by appending the contents of all provided lists.
flist = FilesList().populate_from_files(targets)
elif params.INPUTTYPE == 'T':
# Create the FilesList by supplying a direct list of files.
flist = FilesList(targets)
elif params.INPUTTYPE == 'D':
# Data will be read from STDIN. No files needed. Make an empty list.
# Not all functions will switch to STDIN given this. Several will simply do nothing.
flist = FilesList()
else:
sys.exit(ml.errstring("Unknown INPUTTYPE."))
# Metadata. ---------------------------------------------------------------
metadata = ""
if params.metadata:
metadata = store_metadata(flist, params.metadata)
# OUTPUT ##################################################################
outdir, outpref, outsuff = None, None, None
if params.out:
outdir = expand_fpaths([params.out[0]])[0]
outpref = params.out[1]
outsuff = params.out[2]
# CALL DETAILS ############################################################
if params.log:
ml.log_command()
if params.STDERRcomments:
sys.stderr.write(ml.paramstring())
# TASKS ###################################################################
# Simple file PROBEs. -----------------------------------------------------
if params.probe:
result = _funcDispatch[params.probe](flist)
try:
if params.comments:
sys.stdout.write(ml.paramstring())
print(result.to_file())
if params.STDERRcomments:
sys.stderr.write(ml.donestring("probing"))
except IOError:
pass
# Filter DIRECTORY contents. ----------------------------------------------
elif params.dir is not None:
result = FilesList().populate_from_directories(flist, params.dir)
try:
if params.comments:
sys.stdout.write(ml.paramstring())
sys.stdout.write(result.to_file())
if params.STDERRcomments:
sys.stderr.write(ml.donestring("listing"))
except IOError:
pass
# LOOP arbitrary command. -------------------------------------------------
elif params.loop:
if params.loop[0] == 'R':
# Generate the range.
myrange = []
for t in targets: # Look for multiple ranges.
v = t.split(":")
if len(v) > 1:
myrange.extend(list(range(int(v[0]), int(v[1]) + 1)))
else:
sys.exit(ml.errstring("No numeric ranges specified. Use -h for help with the newest syntax."))
flist = FilesList(myrange)
# Strip left and/or right padding first.
command = []
for c in params.loop[1:]:
command.append(c.lstrip("+"))
try:
do_foreach(flist, command, out=(outdir, outpref, outsuff),
progress=(params.STDERRcomments), comments=params.comments,
log=params.log)
if params.STDERRcomments:
sys.stderr.write(ml.donestring("looping-"+ params.loop[0]))
except IOError:
pass
# Symbolic LINKS. ---------------------------------------------------------
elif params.link:
slink(flist, dir=params.link[0], aliases=params.link[1:])
if params.STDERRcomments:
sys.stderr.write(ml.donestring("linking"))
# SWAP substrings. --------------------------------------------------------
elif params.swap is not None:
result = swap_strFiles(flist, insep=params.sep, outsep=params.swap)
# Create output filenames, if applicable. If [], then STDOUT.
outfiles = make_names(flist.aliases, (outdir, outpref, outsuff))
outstream = sys.stdout
# I need the for loop to iterate at least once. Relevant for STDIN input, since I have no input files listed then.
if flist == []:
flist.append("<STDIN>")
# Print the converted data.
for i, (myfile, myalias) in flist.enum():
if outfiles:
# Send to individual file instead of STDOUT.
outstream = open(outfiles[i], 'w')
try:
if params.comments:
# Embed call info at beginning of output. More useful there when outputting to files.
outstream.write(ml.paramstring("SOURCE: " + myfile))
outstream.write(result[i].rstrip("\n") +"\n")
except IOError:
pass
finally:
if outfiles:
# Don't want to accidentally close STDOUT.
outstream.close()
if params.STDERRcomments:
try:
sys.stderr.write(ml.donestring("swapping delimiters"))
except IOError:
pass
# Get COLUMNS or RANDOM columns. (most code shared) -----------------------
elif params.cols or params.rndcols:
# Create output filenames, if applicable. If [], then STDOUT.
outfiles = make_names(flist.aliases, (outdir, outpref, outsuff))
outstream = sys.stdout
merge = False if outfiles else True
# Determine if using index, and assign appropriate value.
idx = None
if params.index:
idx = 0
else:
idx = None
# Extract data.
result = None
if params.cols:
cols = []
for p in params.cols: # space separated arguments
cols.extend(p.split(",")) # comma separated arguments
# Get the specified columns.
result = get_columns(flist, cols=cols, colSep=params.sep,
header=params.labels, merge=merge, index=idx)
else:
# Get random columns.
result = get_random_columns(flist, k=params.rndcols, colSep=params.sep,
header=params.labels, merge=merge, index=idx)
# I need the for loop to iterate at least once. Relevant for STDIN input, since I have no input files listed then.
if flist == []:
flist.append("<STDIN>")
if merge:
try:
if params.comments:
# Embed call info at beginning of output.
outstream.write(ml.paramstring("SOURCE: " + myfile))
if params.metadata:
# Dump all the metadata from all the merged input sources.
for i, (myfile, myalias) in flist.enum():
outstream.write(metadata[myfile])
outstream.write( result[0].to_csv(header=params.relabel, index=params.index, sep=params.sep[0]))
except IOError:
pass
else:
for i, (myfile, myalias) in flist.enum():
outstream = open(outfiles[i], 'w')
try:
if params.comments:
# Embed call info at beginning of output.
outstream.write(ml.paramstring("SOURCE: " + myfile))
if params.metadata:
outstream.write(metadata[myfile])
outstream.write( result[i].to_csv(header=params.relabel, index=params.index, sep=params.sep[0]))
except IOError:
pass
finally:
outstream.close()
if params.STDERRcomments:
try:
if params.cols:
sys.stderr.write(ml.donestring("getting columns, index "+ str(idx is not None)))
else:
sys.stderr.write(ml.donestring("getting random columns, index "+ str(idx is not None)))
except IOError:
pass
# APPEND columns. ---------------------------------------------------------
elif params.appnd:
idx = None
if params.index:
idx = 0
df = append_columns(flist, colSep=params.sep, header=params.labels, index=idx)
try:
if params.comments:
ml.parastring()
if params.metadata:
# Dump all the metadata from all the merged input sources.
for i, (myfile, myalias) in flist.enum():
outstream.write(metadata[myfile])
sys.stdout.write(df.to_csv(sep=params.sep[0], header=params.relabel, index=params.index))
if params.STDERRcomments:
sys.stderr.write(ml.donestring("appending columns, index "+ str(idx is not None)))
except IOError:
pass
# COUNT columns. ----------------------------------------------------------
elif params.cntcols:
result = count_columns(flist, params.sep)
try:
if params.comments:
sys.stdout.write(ml.paramstring())
for f, (myfile, myalias) in flist.enum():
print("\t".join([str(result[f]), myalias, myfile]))
if params.STDERRcomments:
sys.stderr.write(ml.donestring("counting columns"))
except IOError:
pass
# SET of values in row/column. --------------------------------------------
elif params.valset:
nest = get_valuesSet(flist, axis=params.valset[0], index=params.valset[1], filter=params.valset[2], colSep=params.sep)
try:
if params.comments:
sys.stdout.write(ml.paramstring())
for f, (myfile, myalias) in flist.enum():
print("".join([myfile, "\t", str(nest[f])]))
if params.STDERRcomments:
sys.stderr.write(ml.donestring("obtaining set of values."))
except IOError:
pass
# # All done.
# if params.STDERRcomments:
# sys.stderr.write(ml.donestring())
##### E X E C U T I O N #####
# Call main only if the module was executed directly.
if __name__ == "__main__":
main(sys.argv[1:])
sys.exit(0)
#EOF | mit |
johannfaouzi/pyts | pyts/transformation/bag_of_patterns.py | 1 | 7803 | """Code for Bag-of-patterns representation for time series."""
# Author: Johann Faouzi <[email protected]>
# License: BSD-3-Clause
from scipy.sparse import csr_matrix
from sklearn.base import BaseEstimator
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.utils.validation import check_array, check_is_fitted
from ..bag_of_words import BagOfWords
from ..base import UnivariateTransformerMixin
class BagOfPatterns(BaseEstimator, UnivariateTransformerMixin):
"""Bag-of-patterns representation for time series.
This algorithm uses a sliding window to extract subsequences from the
time series and transforms each subsequence into a word using the
Piecewise Aggregate Approximation and the Symbolic Aggregate approXimation
algorithms. Thus it transforms each time series into a bag of words.
Then it derives the frequencies of each word for each time series.
Parameters
----------
window_size : int or float (default = 0.5)
Length of the sliding window. If float, it represents
a percentage of the size of each time series and must be
between 0 and 1.
word_size : int or float (default = 0.5)
Length of the words. If float, it represents
a percentage of the length of the sliding window and must be
between 0. and 1.
n_bins : int (default = 4)
The number of bins to produce. It must be between 2 and
``min(window_size, 26)``.
strategy : 'uniform', 'quantile' or 'normal' (default = 'normal')
Strategy used to define the widths of the bins:
- 'uniform': All bins in each sample have identical widths
- 'quantile': All bins in each sample have the same number of points
- 'normal': Bin edges are quantiles from a standard normal distribution
numerosity_reduction : bool (default = True)
If True, delete sample-wise all but one occurence of back to back
identical occurences of the same words.
window_step : int or float (default = 1)
Step of the sliding window. If float, it represents the percentage of
the size of each time series and must be between 0 and 1. The step of
sliding window will be computed as
``ceil(window_step * n_timestamps)``.
norm_mean : bool (default = True)
If True, center each subseries before scaling.
norm_std : bool (default = True)
If True, scale each subseries to unit variance.
sparse : bool (default = True)
Return a sparse matrix if True, else return an array.
overlapping : bool (default = True)
If True, time points may belong to two bins when decreasing the size
of the subsequence with the Piecewise Aggregate Approximation
algorithm. If False, each time point belong to one single bin, but
the size of the bins may vary.
alphabet : None or array-like, shape = (n_bins,)
Alphabet to use. If None, the first `n_bins` letters of the Latin
alphabet are used.
Attributes
----------
vocabulary_ : dict
A mapping of feature indices to terms.
References
----------
.. [1] J. Lin, R. Khade and Y. Li, "Rotation-invariant similarity in time
series using bag-of-patterns representation". Journal of Intelligent
Information Systems, 39 (2), 287-315 (2012).
Examples
--------
>>> import numpy as np
>>> from pyts.transformation import BagOfPatterns
>>> X = np.arange(12).reshape(2, 6)
>>> bop = BagOfPatterns(window_size=4, word_size=4, sparse=False)
>>> bop.fit_transform(X)
array(...)
>>> bop.set_params(numerosity_reduction=False)
BagOfPatterns(...)
>>> bop.fit_transform(X)
array(...)
"""
def __init__(self, window_size=0.5, word_size=0.5, n_bins=4,
strategy='normal', numerosity_reduction=True, window_step=1,
norm_mean=True, norm_std=True, sparse=True, overlapping=True,
alphabet=None):
self.window_size = window_size
self.word_size = word_size
self.n_bins = n_bins
self.strategy = strategy
self.numerosity_reduction = numerosity_reduction
self.window_step = window_step
self.norm_mean = norm_mean
self.norm_std = norm_std
self.sparse = sparse
self.overlapping = overlapping
self.alphabet = alphabet
def fit(self, X, y=None):
"""Learn the dictionary.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Input data
y
Ignored
Returns
-------
self : object
"""
# Transform each time series into a bag of words
bow = BagOfWords(
window_size=self.window_size, word_size=self.word_size,
n_bins=self.n_bins, strategy=self.strategy,
numerosity_reduction=self.numerosity_reduction,
window_step=self.window_step, norm_mean=self.norm_mean,
norm_std=self.norm_std, overlapping=self.overlapping,
alphabet=self.alphabet
)
X_bow = bow.transform(X)
# Learn the vocabulary
vectorizer = CountVectorizer()
vectorizer.fit(X_bow)
self.vocabulary_ = {value: key for key, value in
vectorizer.vocabulary_.items()}
self._vectorizer = vectorizer
return self
def transform(self, X):
"""Derive word frequencies for each time series.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Data to transform.
Returns
-------
X_new : array, shape = (n_samples, n_words)
Word frequencies.
"""
X = check_array(X, dtype='float64')
check_is_fitted(self, 'vocabulary_')
# Transform each time series into a bag of words
bow = BagOfWords(
window_size=self.window_size, word_size=self.word_size,
n_bins=self.n_bins, strategy=self.strategy,
numerosity_reduction=self.numerosity_reduction,
window_step=self.window_step, norm_mean=self.norm_mean,
norm_std=self.norm_std, overlapping=self.overlapping,
alphabet=self.alphabet
)
X_bow = bow.transform(X)
# Derive frequencies for each word in the vocabulary
X_bop = self._vectorizer.transform(X_bow)
if not self.sparse:
return X_bop.A
return csr_matrix(X_bop)
def fit_transform(self, X, y=None):
"""Derive word frequencies for each time series.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Data to transform.
y
Ignored
Returns
-------
X_new : array, shape = (n_samples, n_words)
Word frequencies.
"""
# Transform each time series into a bag of words
bow = BagOfWords(
window_size=self.window_size, word_size=self.word_size,
n_bins=self.n_bins, strategy=self.strategy,
numerosity_reduction=self.numerosity_reduction,
window_step=self.window_step, norm_mean=self.norm_mean,
norm_std=self.norm_std, overlapping=self.overlapping,
alphabet=self.alphabet
)
X_bow = bow.transform(X)
# Derive frequencies of each word
vectorizer = CountVectorizer()
X_bop = vectorizer.fit_transform(X_bow)
self.vocabulary_ = {value: key for key, value in
vectorizer.vocabulary_.items()}
self._vectorizer = vectorizer
if not self.sparse:
return X_bop.A
return csr_matrix(X_bop)
| bsd-3-clause |
otmaneJai/Zipline | tests/finance/test_slippage.py | 32 | 18400 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for finance.slippage
"""
import datetime
import pytz
from unittest import TestCase
from nose_parameterized import parameterized
import pandas as pd
from zipline.finance.slippage import VolumeShareSlippage
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.finance.blotter import Order
class SlippageTestCase(TestCase):
def test_volume_share_slippage(self):
event = Event(
{'volume': 200,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'open': 3.0}
)
slippage_model = VolumeShareSlippage()
open_orders = [
Order(dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=100,
filled=0,
sid=133)
]
orders_txns = list(slippage_model.simulate(
event,
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.01875),
'dt': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'amount': int(50),
'sid': int(133),
'commission': None,
'type': DATASOURCE_TYPE.TRANSACTION,
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
# TODO: Make expected_txn an Transaction object and ensure there
# is a __eq__ for that class.
self.assertEquals(expected_txn, txn.__dict__)
def test_orders_limit(self):
events = self.gen_trades()
slippage_model = VolumeShareSlippage()
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.6})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 1)
txn = orders_txns[0][1]
expected_txn = {
'price': float(3.500875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'amount': int(100),
'sid': int(133),
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
expected_txn = {}
self.assertEquals(len(orders_txns), 0)
# short, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.4})
]
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.499125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-100),
'sid': int(133)
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
STOP_ORDER_CASES = {
# Stop orders can be long/short and have their price greater or
# less than the stop.
#
# A stop being reached is conditional on the order direction.
# Long orders reach the stop when the price is greater than the stop.
# Short orders reach the stop when the price is less than the stop.
#
# Which leads to the following 4 cases:
#
# | long | short |
# | price > stop | | |
# | price < stop | | |
#
# Currently the slippage module acts according to the following table,
# where 'X' represents triggering a transaction
# | long | short |
# | price > stop | | X |
# | price < stop | X | |
#
# However, the following behavior *should* be followed.
#
# | long | short |
# | price > stop | X | |
# | price < stop | | X |
'long | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 4.0,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 4.0,
'open': 3.5
},
'expected': {
'transaction': {
'price': 4.001,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': 100,
'sid': 133,
}
}
},
'long | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 3.6
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.5,
'open': 4.0
},
'expected': {
'transaction': None
}
},
'short | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.4
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.5,
'open': 3.0
},
'expected': {
'transaction': None
}
},
'short | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.0,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.0,
'open': 3.0
},
'expected': {
'transaction': {
'price': 2.99925,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': -100,
'sid': 133,
}
}
},
}
@parameterized.expand([
(name, case['order'], case['event'], case['expected'])
for name, case in STOP_ORDER_CASES.items()
])
def test_orders_stop(self, name, order_data, event_data, expected):
order = Order(**order_data)
event = Event(initial_values=event_data)
slippage_model = VolumeShareSlippage()
try:
_, txn = next(slippage_model.simulate(event, [order]))
except StopIteration:
txn = None
if expected['transaction'] is None:
self.assertIsNone(txn)
else:
self.assertIsNotNone(txn)
for key, value in expected['transaction'].items():
self.assertEquals(value, txn[key])
def test_orders_stop_limit(self):
events = self.gen_trades()
slippage_model = VolumeShareSlippage()
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.0})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.6})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.500875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'amount': int(100),
'sid': int(133)
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 4.0})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 3.4})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.499125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-100),
'sid': int(133)
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
def gen_trades(self):
# create a sequence of trades
events = [
Event({
'volume': 2000,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'open': 3.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.5,
'datetime': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.5,
'dt':
datetime.datetime(2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'open': 3.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 4.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 33, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 4.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 33, tzinfo=pytz.utc),
'open': 3.5
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.5,
'datetime': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.5,
'dt':
datetime.datetime(2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'open': 4.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 35, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 35, tzinfo=pytz.utc),
'open': 3.5
})
]
return events
| apache-2.0 |
zihua/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 43 | 39945 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
farodin91/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/datasets/svmlight_format.py | 1 | 16064 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
import io
import os.path
from contextlib import closing
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from ..externals.six.moves import range, zip
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_labels]
Target values. Class labels must be an integer or float, or array-like
objects of integer or float for multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| mit |
gfyoung/pandas | pandas/tests/frame/conftest.py | 1 | 8564 | from itertools import product
import numpy as np
import pytest
from pandas import DataFrame, NaT, date_range
import pandas._testing as tm
@pytest.fixture(params=product([True, False], [True, False]))
def close_open_fixture(request):
return request.param
@pytest.fixture
def float_frame_with_na():
"""
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D']; some entries are missing
A B C D
ABwBzA0ljw -1.128865 -0.897161 0.046603 0.274997
DJiRzmbyQF 0.728869 0.233502 0.722431 -0.890872
neMgPD5UBF 0.486072 -1.027393 -0.031553 1.449522
0yWA4n8VeX -1.937191 -1.142531 0.805215 -0.462018
3slYUbbqU1 0.153260 1.164691 1.489795 -0.545826
soujjZ0A08 NaN NaN NaN NaN
7W6NLGsjB9 NaN NaN NaN NaN
... ... ... ... ...
uhfeaNkCR1 -0.231210 -0.340472 0.244717 -0.901590
n6p7GYuBIV -0.419052 1.922721 -0.125361 -0.727717
ZhzAeY6p1y 1.234374 -1.425359 -0.827038 -0.633189
uWdPsORyUh 0.046738 -0.980445 -1.102965 0.605503
3DJA6aN590 -0.091018 -1.684734 -1.100900 0.215947
2GBPAzdbMk -2.883405 -1.021071 1.209877 1.633083
sHadBoyVHw -2.223032 -0.326384 0.258931 0.245517
[30 rows x 4 columns]
"""
df = DataFrame(tm.getSeriesData())
# set some NAs
df.iloc[5:10] = np.nan
df.iloc[15:20, -2:] = np.nan
return df
@pytest.fixture
def bool_frame_with_na():
"""
Fixture for DataFrame of booleans with index of unique strings
Columns are ['A', 'B', 'C', 'D']; some entries are missing
A B C D
zBZxY2IDGd False False False False
IhBWBMWllt False True True True
ctjdvZSR6R True False True True
AVTujptmxb False True False True
G9lrImrSWq False False False True
sFFwdIUfz2 NaN NaN NaN NaN
s15ptEJnRb NaN NaN NaN NaN
... ... ... ... ...
UW41KkDyZ4 True True False False
l9l6XkOdqV True False False False
X2MeZfzDYA False True False False
xWkIKU7vfX False True False True
QOhL6VmpGU False False False True
22PwkRJdat False True False False
kfboQ3VeIK True False True False
[30 rows x 4 columns]
"""
df = DataFrame(tm.getSeriesData()) > 0
df = df.astype(object)
# set some NAs
df.iloc[5:10] = np.nan
df.iloc[15:20, -2:] = np.nan
# For `any` tests we need to have at least one True before the first NaN
# in each column
for i in range(4):
df.iloc[i, i] = True
return df
@pytest.fixture
def float_string_frame():
"""
Fixture for DataFrame of floats and strings with index of unique strings
Columns are ['A', 'B', 'C', 'D', 'foo'].
A B C D foo
w3orJvq07g -1.594062 -1.084273 -1.252457 0.356460 bar
PeukuVdmz2 0.109855 -0.955086 -0.809485 0.409747 bar
ahp2KvwiM8 -1.533729 -0.142519 -0.154666 1.302623 bar
3WSJ7BUCGd 2.484964 0.213829 0.034778 -2.327831 bar
khdAmufk0U -0.193480 -0.743518 -0.077987 0.153646 bar
LE2DZiFlrE -0.193566 -1.343194 -0.107321 0.959978 bar
HJXSJhVn7b 0.142590 1.257603 -0.659409 -0.223844 bar
... ... ... ... ... ...
9a1Vypttgw -1.316394 1.601354 0.173596 1.213196 bar
h5d1gVFbEy 0.609475 1.106738 -0.155271 0.294630 bar
mK9LsTQG92 1.303613 0.857040 -1.019153 0.369468 bar
oOLksd9gKH 0.558219 -0.134491 -0.289869 -0.951033 bar
9jgoOjKyHg 0.058270 -0.496110 -0.413212 -0.852659 bar
jZLDHclHAO 0.096298 1.267510 0.549206 -0.005235 bar
lR0nxDp1C2 -2.119350 -0.794384 0.544118 0.145849 bar
[30 rows x 5 columns]
"""
df = DataFrame(tm.getSeriesData())
df["foo"] = "bar"
return df
@pytest.fixture
def mixed_float_frame():
"""
Fixture for DataFrame of different float types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
GI7bbDaEZe -0.237908 -0.246225 -0.468506 0.752993
KGp9mFepzA -1.140809 -0.644046 -1.225586 0.801588
VeVYLAb1l2 -1.154013 -1.677615 0.690430 -0.003731
kmPME4WKhO 0.979578 0.998274 -0.776367 0.897607
CPyopdXTiz 0.048119 -0.257174 0.836426 0.111266
0kJZQndAj0 0.274357 -0.281135 -0.344238 0.834541
tqdwQsaHG8 -0.979716 -0.519897 0.582031 0.144710
... ... ... ... ...
7FhZTWILQj -2.906357 1.261039 -0.780273 -0.537237
4pUDPM4eGq -2.042512 -0.464382 -0.382080 1.132612
B8dUgUzwTi -1.506637 -0.364435 1.087891 0.297653
hErlVYjVv9 1.477453 -0.495515 -0.713867 1.438427
1BKN3o7YLs 0.127535 -0.349812 -0.881836 0.489827
9S4Ekn7zga 1.445518 -2.095149 0.031982 0.373204
xN1dNn6OV6 1.425017 -0.983995 -0.363281 -0.224502
[30 rows x 4 columns]
"""
df = DataFrame(tm.getSeriesData())
df.A = df.A.astype("float32")
df.B = df.B.astype("float32")
df.C = df.C.astype("float16")
df.D = df.D.astype("float64")
return df
@pytest.fixture
def mixed_int_frame():
"""
Fixture for DataFrame of different int types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
mUrCZ67juP 0 1 2 2
rw99ACYaKS 0 1 0 0
7QsEcpaaVU 0 1 1 1
xkrimI2pcE 0 1 0 0
dz01SuzoS8 0 1 255 255
ccQkqOHX75 -1 1 0 0
DN0iXaoDLd 0 1 0 0
... .. .. ... ...
Dfb141wAaQ 1 1 254 254
IPD8eQOVu5 0 1 0 0
CcaKulsCmv 0 1 0 0
rIBa8gu7E5 0 1 0 0
RP6peZmh5o 0 1 1 1
NMb9pipQWQ 0 1 0 0
PqgbJEzjib 0 1 3 3
[30 rows x 4 columns]
"""
df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()})
df.A = df.A.astype("int32")
df.B = np.ones(len(df.B), dtype="uint64")
df.C = df.C.astype("uint8")
df.D = df.C.astype("int64")
return df
@pytest.fixture
def timezone_frame():
"""
Fixture for DataFrame of date_range Series with different time zones
Columns are ['A', 'B', 'C']; some entries are missing
A B C
0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00
1 2013-01-02 NaT NaT
2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00
"""
df = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
df.iloc[1, 1] = NaT
df.iloc[1, 2] = NaT
return df
@pytest.fixture
def uint64_frame():
"""
Fixture for DataFrame with uint64 values
Columns are ['A', 'B']
"""
return DataFrame(
{"A": np.arange(3), "B": [2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10]}, dtype=np.uint64
)
@pytest.fixture
def simple_frame():
"""
Fixture for simple 3x3 DataFrame
Columns are ['one', 'two', 'three'], index is ['a', 'b', 'c'].
one two three
a 1.0 2.0 3.0
b 4.0 5.0 6.0
c 7.0 8.0 9.0
"""
arr = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])
return DataFrame(arr, columns=["one", "two", "three"], index=["a", "b", "c"])
@pytest.fixture
def frame_of_index_cols():
"""
Fixture for DataFrame of columns that can be used for indexing
Columns are ['A', 'B', 'C', 'D', 'E', ('tuple', 'as', 'label')];
'A' & 'B' contain duplicates (but are jointly unique), the rest are unique.
A B C D E (tuple, as, label)
0 foo one a 0.608477 -0.012500 -1.664297
1 foo two b -0.633460 0.249614 -0.364411
2 foo three c 0.615256 2.154968 -0.834666
3 bar one d 0.234246 1.085675 0.718445
4 bar two e 0.533841 -0.005702 -3.533912
"""
df = DataFrame(
{
"A": ["foo", "foo", "foo", "bar", "bar"],
"B": ["one", "two", "three", "one", "two"],
"C": ["a", "b", "c", "d", "e"],
"D": np.random.randn(5),
"E": np.random.randn(5),
("tuple", "as", "label"): np.random.randn(5),
}
)
return df
| bsd-3-clause |
yutiansut/QUANTAXIS | QUANTAXIS/QASU/save_okex.py | 2 | 21499 | # coding: utf-8
# Author: 阿财(Rgveda@github)([email protected])
# Created date: 2020-02-27
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import time
from dateutil.tz import tzutc
from dateutil.relativedelta import relativedelta
import pandas as pd
from QUANTAXIS.QAUtil import (
DATABASE,
QASETTING,
QA_util_log_info,
QA_util_log_expection,
QA_util_to_json_from_pandas
)
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_timestamp_to_str,
QA_util_datetime_to_Unix_timestamp,
QA_util_print_timestamp
)
from QUANTAXIS.QAFetch.QAOKEx import (
QA_fetch_okex_symbols,
QA_fetch_okex_kline,
QA_fetch_okex_kline_min,
OKEx2QA_FREQUENCY_DICT
)
from QUANTAXIS.QAUtil.QAcrypto import (
QA_util_save_raw_symbols,
QA_util_find_missing_kline
)
from QUANTAXIS.QAFetch.QAQuery import (QA_fetch_cryptocurrency_list)
import pymongo
# OKEx的历史数据只提供2000个bar
OKEx_MIN_DATE = datetime.datetime(2017, 10, 1, tzinfo=tzutc())
OKEx_EXCHANGE = 'OKEX'
OKEx_SYMBOL = 'OKEX.{}'
def QA_SU_save_okex(frequency):
"""
Save OKEx kline "smart"
"""
if (frequency not in ["1d", '86400', "1day", "day"]):
return QA_SU_save_okex_min(frequency)
else:
return QA_SU_save_okex_day(frequency)
def QA_SU_save_okex_day(
frequency='86400',
ui_log=None,
ui_progress=None):
"""
Save OKEx day kline K线 日线数据,统一转化字段保存数据为 crypto_asset_day
"""
symbol_template = OKEx_SYMBOL
symbol_list = QA_fetch_cryptocurrency_list(OKEx_EXCHANGE)
col = DATABASE.cryptocurrency_day
col.create_index(
[
("symbol",
pymongo.ASCENDING),
("date_stamp",
pymongo.ASCENDING)
],
unique=True
)
end = datetime.datetime.now(tzutc())
QA_util_log_info(
'Starting DOWNLOAD PROGRESS of day Klines from {:s}... '.format(OKEx_EXCHANGE),
ui_log=ui_log,
ui_progress=ui_progress
)
for index in range(len(symbol_list)):
symbol_info = symbol_list.iloc[index]
QA_util_log_info(
'The "{}" #{} of total in {}'.format(
symbol_template.format(symbol_info['symbol']),
index,
len(symbol_list)
),
ui_log=ui_log,
ui_progress=ui_progress
)
QA_util_log_info(
'DOWNLOAD PROGRESS {} '
.format(str(float(index / len(symbol_list) * 100))[0:4] + '%'),
ui_log=ui_log,
ui_progress=ui_progress
)
query_id = {
"symbol": symbol_template.format(symbol_info['symbol']),
}
ref = col.find(query_id).sort('date_stamp', -1)
if (col.count_documents(query_id) > 0):
start_stamp = ref.next()['date_stamp']
start_time = datetime.datetime.fromtimestamp(
start_stamp + 1,
tz=tzutc()
)
QA_util_log_info(
'UPDATE_SYMBOL "{}" Trying updating "{}" from {} to {}'.format(
symbol_template.format(symbol_info['symbol']),
OKEx2QA_FREQUENCY_DICT[frequency],
QA_util_timestamp_to_str(start_time),
QA_util_timestamp_to_str(end)
),
ui_log=ui_log,
ui_progress=ui_progress
)
# 查询到 Kline 缺漏,点抓取模式,按缺失的时间段精确请求K线数据
missing_data_list = QA_util_find_missing_kline(
symbol_template.format(symbol_info['symbol']),
OKEx2QA_FREQUENCY_DICT[frequency],
)[::-1]
else:
start_time = OKEx_MIN_DATE
QA_util_log_info(
'NEW_SYMBOL "{}" Trying downloading "{}" from {} to {}'.format(
symbol_template.format(symbol_info['symbol']),
OKEx2QA_FREQUENCY_DICT[frequency],
QA_util_timestamp_to_str(start_time),
QA_util_timestamp_to_str(end)
),
ui_log=ui_log,
ui_progress=ui_progress
)
miss_kline = pd.DataFrame(
[
[
int(QA_util_datetime_to_Unix_timestamp(start_time)),
int(QA_util_datetime_to_Unix_timestamp(end)),
'{} to {}'.format(start_time,
end)
]
],
columns=['expected',
'between',
'missing']
)
missing_data_list = miss_kline.values
if len(missing_data_list) > 0:
# 查询确定中断的K线数据起止时间,缺分时数据,补分时数据
expected = 0
between = 1
missing = 2
reqParams = {}
for i in range(len(missing_data_list)):
reqParams['from'] = int(missing_data_list[i][expected])
reqParams['to'] = int(missing_data_list[i][between])
if (reqParams['from'] >
(QA_util_datetime_to_Unix_timestamp() + 120)):
# 出现“未来”时间,一般是默认时区设置错误造成的
QA_util_log_info(
'A unexpected \'Future\' timestamp got, Please check self.missing_data_list_func param \'tzlocalize\' set. More info: {:s}@{:s} at {:s} but current time is {}'
.format(
symbol_template.format(symbol_info['symbol']),
frequency,
QA_util_print_timestamp(reqParams['from']),
QA_util_print_timestamp(
QA_util_datetime_to_Unix_timestamp()
)
)
)
# 跳到下一个时间段
continue
QA_util_log_info(
'Fetch "{:s}" slices "{:s}" kline:{:s} to {:s}'.format(
symbol_template.format(symbol_info['symbol']),
OKEx2QA_FREQUENCY_DICT[frequency],
QA_util_timestamp_to_str(
missing_data_list[i][expected]
)[2:16],
QA_util_timestamp_to_str(
missing_data_list[i][between]
)[2:16]
)
)
data = QA_fetch_okex_kline(
symbol_info['symbol'],
time.mktime(start_time.utctimetuple()),
time.mktime(end.utctimetuple()),
frequency,
callback_func=QA_SU_save_data_okex_callback
)
if data is None:
QA_util_log_info(
'SYMBOL "{}" from {} to {} has no data'.format(
symbol_template.format(symbol_info['symbol']),
QA_util_timestamp_to_str(start_time),
QA_util_timestamp_to_str(end)
),
ui_log=ui_log,
ui_progress=ui_progress
)
continue
QA_util_log_info(
'DOWNLOAD PROGRESS of day Klines from {:s} accomplished.'.format(OKEx_EXCHANGE),
ui_log=ui_log,
ui_progress=ui_progress
)
def QA_SU_save_okex_min(
frequency='60',
ui_log=None,
ui_progress=None):
"""
Save OKEx min kline 分钟线数据,统一转化字段保存数据为 crypto_asset_min
"""
symbol_template = OKEx_SYMBOL
symbol_list = QA_fetch_cryptocurrency_list(OKEx_EXCHANGE)
col = DATABASE.cryptocurrency_min
col.create_index(
[
("symbol",
pymongo.ASCENDING),
('time_stamp',
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING)
]
)
col.create_index(
[
("symbol",
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
('time_stamp',
pymongo.ASCENDING)
],
unique=True
)
end = datetime.datetime.now(tzutc())
QA_util_log_info(
'Starting DOWNLOAD PROGRESS of min Klines from {:s}... '.format(OKEx_EXCHANGE),
ui_log=ui_log,
ui_progress=ui_progress
)
for index in range(len(symbol_list)):
symbol_info = symbol_list.iloc[index]
# 上架仅处理交易对
QA_util_log_info(
'The "{}" #{} of total in {}'.format(
symbol_template.format(symbol_info['symbol']),
index,
len(symbol_list)
),
ui_log=ui_log,
ui_progress=ui_progress
)
QA_util_log_info(
'DOWNLOAD PROGRESS {} '
.format(str(float(index / len(symbol_list) * 100))[0:4] + '%'),
ui_log=ui_log,
ui_progress=ui_progress
)
query_id = {
"symbol": symbol_template.format(symbol_info['symbol']),
'type': OKEx2QA_FREQUENCY_DICT[frequency]
}
ref = col.find(query_id).sort('time_stamp', -1)
if (col.count_documents(query_id) > 0):
start_stamp = ref.next()['time_stamp']
start_time = datetime.datetime.fromtimestamp(
start_stamp + 1,
tz=tzutc()
)
QA_util_log_info(
'UPDATE_SYMBOL "{}" Trying updating "{}" from {} to {}'.format(
symbol_template.format(symbol_info['symbol']),
OKEx2QA_FREQUENCY_DICT[frequency],
QA_util_timestamp_to_str(start_time),
QA_util_timestamp_to_str(end)
),
ui_log=ui_log,
ui_progress=ui_progress
)
# 查询到 Kline 缺漏,点抓取模式,按缺失的时间段精确请求K线数据
missing_data_list = QA_util_find_missing_kline(
symbol_template.format(symbol_info['symbol']),
OKEx2QA_FREQUENCY_DICT[frequency],
)[::-1]
else:
start_time = OKEx_MIN_DATE
QA_util_log_info(
'NEW_SYMBOL "{}" Trying downloading "{}" from {} to {}'.format(
symbol_template.format(symbol_info['symbol']),
OKEx2QA_FREQUENCY_DICT[frequency],
QA_util_timestamp_to_str(start_time),
QA_util_timestamp_to_str(end)
),
ui_log=ui_log,
ui_progress=ui_progress
)
miss_kline = pd.DataFrame(
[
[
int(QA_util_datetime_to_Unix_timestamp(start_time)),
int(QA_util_datetime_to_Unix_timestamp(end)),
'{} to {}'.format(start_time,
end)
]
],
columns=['expected',
'between',
'missing']
)
missing_data_list = miss_kline.values
if len(missing_data_list) > 0:
# 查询确定中断的K线数据起止时间,缺分时数据,补分时数据
expected = 0
between = 1
missing = 2
reqParams = {}
for i in range(len(missing_data_list)):
reqParams['from'] = int(missing_data_list[i][expected])
reqParams['to'] = int(missing_data_list[i][between])
if (reqParams['from'] >
(QA_util_datetime_to_Unix_timestamp() + 120)):
# 出现“未来”时间,一般是默认时区设置错误造成的
QA_util_log_info(
'A unexpected \'Future\' timestamp got, Please check self.missing_data_list_func param \'tzlocalize\' set. More info: {:s}@{:s} at {:s} but current time is {}'
.format(
symbol_template.format(symbol_info['symbol']),
frequency,
QA_util_print_timestamp(reqParams['from']),
QA_util_print_timestamp(
QA_util_datetime_to_Unix_timestamp()
)
)
)
# 跳到下一个时间段
continue
QA_util_log_info(
'Fetch "{:s}" slices "{:s}" kline:{:s} to {:s}'.format(
symbol_template.format(symbol_info['symbol']),
OKEx2QA_FREQUENCY_DICT[frequency],
QA_util_timestamp_to_str(
missing_data_list[i][expected]
)[2:16],
QA_util_timestamp_to_str(
missing_data_list[i][between]
)[2:16]
)
)
data = QA_fetch_okex_kline_min(
symbol_info['symbol'],
start_time=reqParams['from'],
end_time=reqParams['to'],
frequency=frequency,
callback_func=QA_SU_save_data_okex_callback
)
if data is None:
QA_util_log_info(
'SYMBOL "{}" from {} to {} has no MORE data'.format(
symbol_template.format(symbol_info['symbol']),
QA_util_timestamp_to_str(start_time),
QA_util_timestamp_to_str(end)
)
)
continue
QA_util_log_info(
'DOWNLOAD PROGRESS of min Klines from {:s} accomplished.'.format(OKEx_EXCHANGE),
ui_log=ui_log,
ui_progress=ui_progress
)
def QA_SU_save_okex_1min():
QA_SU_save_okex('60')
def QA_SU_save_okex_1day():
QA_SU_save_okex("86400")
def QA_SU_save_okex_1hour():
QA_SU_save_okex("3600")
def QA_SU_save_okex_symbol(
market=OKEx_EXCHANGE,
client=DATABASE,
):
"""
保存OKEx交易对信息
"""
market = market.upper()
QA_util_log_info('Downloading {:s} symbol list...'.format(market))
# 保存 OKEx API 原始 Symbol 数据备查阅,自动交易用得着
raw_symbol_lists = QA_util_save_raw_symbols(
QA_fetch_okex_symbols,
market
)
if (len(raw_symbol_lists) > 0):
# 保存到 QUANTAXIS.crypto_asset_list 数字资产列表,为了跨市场统一查询做数据汇总
symbol_lists = pd.DataFrame(raw_symbol_lists)
# market,symbol为 mongodb 索引字段,保存之前必须要检查存在
symbol_lists['market'] = market
symbol_lists['category'] = 1
symbol_lists.rename(
{
'instrument_id': 'symbol',
'tick_size': 'price_precision',
},
axis=1,
inplace=True
)
symbol_lists['state'] = 'online'
symbol_lists['name'] = symbol_lists.apply(
lambda x: '{:s}/{:s}'.
format(x['base_currency'].upper(),
x['quote_currency'].upper()),
axis=1
)
symbol_lists['desc'] = symbol_lists['name']
# 移除非共性字段,这些字段只有 broker 才关心,做对应交易所 broker 接口的时候在交易所 raw_symbol_lists
# 数据中读取。
symbol_lists.drop(
[
'min_size',
'size_increment',
],
axis=1,
inplace=True
)
if ('_id' in symbol_lists.columns.values):
# 有时有,必须单独删除
symbol_lists.drop(
[
'_id',
],
axis=1,
inplace=True
)
symbol_lists['created_at'] = int(
time.mktime(datetime.datetime.now().utctimetuple())
)
symbol_lists['updated_at'] = int(
time.mktime(datetime.datetime.now().utctimetuple())
)
coll_cryptocurrency_list = client.cryptocurrency_list
coll_cryptocurrency_list.create_index(
[('market',
pymongo.ASCENDING),
('symbol',
pymongo.ASCENDING)],
unique=True
)
try:
query_id = {'market': market}
if (coll_cryptocurrency_list.count_documents(query_id) > 0):
# 删掉重复数据
query_id = {
'market': market,
'symbol': {
'$in': symbol_lists['symbol'].tolist()
}
}
coll_cryptocurrency_list.delete_many(query_id)
coll_cryptocurrency_list.insert_many(
QA_util_to_json_from_pandas(symbol_lists)
)
return symbol_lists
except:
QA_util_log_expection(
'QA_SU_save_okex_symbol(): Insert_many(symbol) to "cryptocurrency_list" got Exception with {} klines'
.format(len(symbol_lists))
)
pass
return []
def QA_SU_save_data_okex_callback(data, freq):
"""
异步获取数据回调用的 MongoDB 存储函数,okex返回数据也是时间倒序排列
"""
symbol_template = OKEx_SYMBOL
QA_util_log_info(
'SYMBOL "{}" Recived "{}" from {} to {} in total {} klines'.format(
data.iloc[0].symbol,
freq,
time.strftime(
'%Y-%m-%d %H:%M:%S',
time.localtime(data.iloc[-1].time_stamp)
)[2:16],
time.strftime(
'%Y-%m-%d %H:%M:%S',
time.localtime(data.iloc[0].time_stamp)
)[2:16],
len(data)
)
)
if (freq not in ['1day', '86400', 'day', '1d']):
col = DATABASE.cryptocurrency_min
col.create_index(
[
("symbol",
pymongo.ASCENDING),
('time_stamp',
pymongo.ASCENDING),
('date_stamp',
pymongo.ASCENDING)
]
)
col.create_index(
[
("symbol",
pymongo.ASCENDING),
("type",
pymongo.ASCENDING),
('time_stamp',
pymongo.ASCENDING)
],
unique=True
)
# 查询是否新 tick
query_id = {
"symbol": data.iloc[0].symbol,
'type': data.iloc[0].type,
'time_stamp': {
'$in': data['time_stamp'].tolist()
}
}
refcount = col.count_documents(query_id)
else:
col = DATABASE.cryptocurrency_day
col.create_index(
[
("symbol",
pymongo.ASCENDING),
("date_stamp",
pymongo.ASCENDING)
],
unique=True
)
# 查询是否新 tick
query_id = {
"symbol": data.iloc[0].symbol,
'date_stamp': {
'$in': data['date_stamp'].tolist()
}
}
refcount = col.count_documents(query_id)
if refcount > 0:
if (len(data) > 1):
# 删掉重复数据
col.delete_many(query_id)
data = QA_util_to_json_from_pandas(data)
col.insert_many(data)
else:
# 持续接收行情,更新记录
data.drop('created_at', axis=1, inplace=True)
data = QA_util_to_json_from_pandas(data)
col.replace_one(query_id, data[0])
else:
# 新 tick,插入记录
data = QA_util_to_json_from_pandas(data)
col.insert_many(data)
if __name__ == '__main__':
QA_SU_save_okex_min('900')
QA_SU_save_okex_symbol()
#QA_SU_save_okex_1day()
#QA_SU_save_okex_1hour()
QA_SU_save_okex_1min()
| mit |
anorfleet/turntable | test/lib/python2.7/site-packages/scipy/stats/kde.py | 9 | 18242 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy.lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
kde.integrate_gaussian(mean, cov) : float
Multiply pdf with a specified Gaussian and integrate over the whole
domain.
kde.integrate_box_1d(low, high) : float
Integrate pdf (1D only) between two bounds.
kde.integrate_box(low_bounds, high_bounds) : float
Integrate pdf over a rectangular space between low_bounds and
high_bounds.
kde.integrate_kde(other_kde) : float
Integrate two kernel density estimates multiplied together.
kde.pdf(points) : ndarray
Alias for ``kde.evaluate(points)``.
kde.logpdf(points) : ndarray
Equivalent to ``np.log(kde.evaluate(points))``.
kde.resample(size=None) : ndarray
Randomly sample a dataset from the estimated pdf.
kde.set_bandwidth(bw_method='scott') : None
Computes the bandwidth, i.e. the coefficient that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
.. versionadded:: 0.11.0
kde.covariance_factor : float
Computes the coefficient (`kde.factor`) that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
The default is `scotts_factor`. A subclass can overwrite this method
to provide a different method, or set it through a call to
`kde.set_bandwidth`.
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
>>> "Measurement model, return two coupled measurements."
>>> m1 = np.random.normal(size=n)
>>> m2 = np.random.normal(scale=0.5, size=n)
>>> return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=np.float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError :
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
diff = self.dataset - mean
tdiff = dot(linalg.inv(sum_cov), diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / sqrt(linalg.det(2 * pi *
sum_cov)) / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
result /= sqrt(linalg.det(2 * pi * sum_cov)) * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
Notes
-----
See `gaussian_kde.evaluate` for more details; this method simply
returns ``np.log(gaussian_kde.evaluate(x))``.
"""
return np.log(self.evaluate(x))
| mit |
Myasuka/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
marcindulak/gpaw-on-aws | benchmark/plot.py | 1 | 4378 | import csv
def CommentStripper(iterator):
for line in iterator:
if line.startswith('# p'):
continue
if line.startswith('# GPAW'):
continue
if line.startswith(' 32'):
continue
if line.startswith(' 64'):
continue
yield line
# extraxt the run information: description line and results on 16 cores for all runs from analyse.txt
rows = []
for row in csv.reader(CommentStripper(open('analyse.txt', 'rb'))):
if len(row) > 0:
rows.append(row)
assert len(rows) % 2 == 0
# ID, 16 core price/hour in USD, total time in sec, price of the 16 cores run in USD cents, cpu type
runs = {}
for index in range(0, len(rows) - 1, 2):
price = eval(rows[index][0].split()[3]) # price per hour
cpu = rows[index][2].strip()
time = eval(rows[index + 1][0].split()[-3].strip())
total_price = time / 3600 * price * 100
runs[rows[index][0].split()[2]] = (price, time, total_price, cpu)
ids = sorted(runs.keys())
# write plot data in csv format
with open('plot.csv', 'w') as csvfile:
fieldnames = ['ID',
'16 core price/hour in USD',
'total time in sec',
'price of the 16 cores run in USD cents',
'cpu type']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for id in ids:
row = {fieldnames[0]: id}
for n, field in enumerate(fieldnames[1:]):
row.update({field: runs[id][n]})
writer.writerow(row)
labels = [id.replace('xeon16.openblas.SL', 'Niflheim') for id in ids]
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab
colors = ('k', 'b', 'g', 'y', 'c', 'w', 'm', 'r')
scale = [l + 0.5 for l in range(len(labels))]
zero = [0.0 for v in scale]
v0010 = [10.0 for v in scale]
v0020 = [20.0 for v in scale]
v0030 = [30.0 for v in scale]
v0050 = [50.0 for v in scale]
v0100 = [100.0 for v in scale]
v0500 = [500.0 for v in scale]
v0800 = [800.0 for v in scale]
v0900 = [900.0 for v in scale]
v1000 = [1000.0 for v in scale]
if 0:
pylab.plot(scale, zero, 'k-', label='_nolegend_')
pylab.plot(v0010, zero, 'k-', label='_nolegend_')
pylab.plot(v0020, zero, 'k-', label='_nolegend_')
pylab.plot(v0020, zero, 'k-', label='_nolegend_')
pylab.plot(v0050, zero, 'k-', label='_nolegend_')
pylab.plot(v0500, zero, 'k-', label='_nolegend_')
pylab.plot(v0800, zero, 'k-', label='_nolegend_')
pylab.plot(v0900, zero, 'k-', label='_nolegend_')
pylab.plot(v1000, zero, 'k-', label='_nolegend_')
pylab.gca().set_ylim(-0.1, 1100.1)
pylab.gca().set_xlim(-0.1, max(scale) + 0.5 + 0.1)
if 0: # print vertical lines at xticks
# http://matplotlib.org/examples/pylab_examples/axhspan_demo.html
for s in scale:
l = pylab.axvline(x=s, linewidth=0.5, color=(0,0,0,0), alpha=0.5)
ay1 = pylab.gca()
ay1.xaxis.set_ticks([n for n in scale])
ay1.xaxis.set_ticklabels(labels)
ay1.yaxis.set_ticks([10., 50., 100., 500., 800., 900., 1000.,])
ay1.yaxis.set_ticklabels(['10', '50', '100', '500', '800', '900', '1000'])
for label in ay1.get_xticklabels() + ay1.get_yticklabels():
label.set_fontsize(12)
# rotate labels http://old.nabble.com/Rotate-x-axes-%28xticks%29-text.-td3141258.html
for n, label in enumerate(ay1.get_xticklabels()):
label.set_rotation(70)
# label.set_position((0.0, 1.0 * (n % 2))) # once up / once down
label.set_position((0.0, 0.0)) # down
# create bins
time = [runs[id][1] for id in ids]
price = [runs[id][2] for id in ids]
width = 0.6
plotscale = [s - width/2 for s in scale]
plots = []
plots.append(pylab.bar(plotscale, price, width, color=colors[0], alpha=1.0))
plots.append(pylab.bar(plotscale, time, width, color=colors[1], alpha=0.3))
pylab.ylabel('Performance')
t = pylab.title('GPAW benchmark on 16 CPU cores')
# http://old.nabble.com/More-space-between-title-and-secondary-x-axis-td31722298.html
t.set_y(1.05)
prop = matplotlib.font_manager.FontProperties(size=12)
leg = pylab.legend(plots, ['price of the run in USD cents',
'run time in seconds'],
fancybox=True, prop=prop)
leg.get_frame().set_alpha(0.5)
# http://www.mail-archive.com/[email protected]/msg03952.html
leg._loc=(0.40, 0.85)
pylab.savefig('plot.png', bbox_inches='tight', dpi=600)
| bsd-2-clause |
zqhuang/COOP | mapio/pyscripts/sharex.py | 2 | 2072 | import numpy as np
from matplotlib import use
use('pdf')
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.colors import LogNorm
import pylab as m
cdict = {
'red' : ((0., 1., 1.), (0.02, 0.56, 0.56), (0.25, 0., 0.), (0.45, 0.1, 0.1), (0.5, 0., 0.) , (0.6, 0.7, 0.7), (0.7, 1., 1.), (0.8, 1., 1.), (1., 1., 1.)),
'green': ((0., 1., 1.), (0.02, 0.21, 0.21), (0.25, 0., 0.), (0.45, 0.9, 0.9) ,(0.5, 1., 1.), (0.7, 1., 1.), (0.8, 0.5, 0.5), (1., 0., 0.)),
'blue' : ((0., 1., 1.), (0.02, 0.94, 0.94), (0.25, 1., 1.), (0.45, 0.1, 0.1), (0.5, 0., 0.), (0.7, 0., 0.), (0.8, 0.2, 0.2), (1., 0., 0.))
}
#generate the colormap with 1024 interpolated values
my_cmap = m.matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 1024)
mpl.cm.register_cmap(cmap = my_cmap)
plt.set_cmap(my_cmap) #'gist_rainbow_r')
fig, axes = plt.subplots(nrows=4, figsize=(10, 8.), sharex = True)
chimin = 0.10000E-01
chimax = 1.2000
lmin = 2.
lmax = 3000.
axes[0].set_yscale('log')
axes[0].set_ylabel('$\ell$')
grid = np.loadtxt("ztinfo.txt").transpose()
im = axes[0].imshow(grid[::-1], extent=[chimin,chimax,lmin, lmax], vmin = 0.1, vmax = 10., norm=LogNorm())
axes[0].set_title('Temperature $S/N$')
axes[1].set_yscale('log')
axes[1].set_ylabel('$\ell$')
grid = np.loadtxt("zeinfo.txt").transpose()
im = axes[1].imshow(grid[::-1], extent=[chimin,chimax,lmin, lmax], vmin = 0.1, vmax = 10., norm=LogNorm())
axes[1].set_title('E polarization $S/N$')
axes[2].set_yscale('log')
axes[2].set_ylabel('$\ell$')
grid = np.loadtxt("zetinfo.txt").transpose()
im = axes[2].imshow(grid[::-1], extent=[chimin,chimax,lmin, lmax], vmin = 0.1, vmax = 10., norm=LogNorm())
axes[2].set_title('T + E $S/N$')
fig.colorbar(im, ax = axes.ravel().tolist())
chivis = np.loadtxt("vis.txt")
chi = [ s[0] for s in chivis ]
vis = [ s[1] for s in chivis ]
axes[3].plot(chi,vis)
axes[3].set_yscale('log')
axes[3].set_ylabel('$\dot\kappa e^{-\kappa} / H_0$')
axes[3].set_xlabel('$\chi/\chi_{rec}$')
axes[3].set_title('differential visiblity')
plt.savefig('zetaSN.pdf', format='pdf')
| gpl-3.0 |
PhE/dask | dask/tests/test_core.py | 3 | 3882 | from collections import namedtuple
from operator import add
from dask.utils import raises
from dask.core import (istask, get, get_dependencies, flatten, subs,
preorder_traversal, quote, list2)
def contains(a, b):
"""
>>> contains({'x': 1, 'y': 2}, {'x': 1})
True
>>> contains({'x': 1, 'y': 2}, {'z': 3})
False
"""
return all(a.get(k) == v for k, v in b.items())
def inc(x):
return x + 1
def add(x, y):
return x + y
def test_istask():
assert istask((inc, 1))
assert not istask(1)
assert not istask((1, 2))
f = namedtuple('f', ['x', 'y'])
assert not istask(f(sum, 2))
d = {':x': 1,
':y': (inc, ':x'),
':z': (add, ':x', ':y')}
def test_preorder_traversal():
t = (add, 1, 2)
assert list(preorder_traversal(t)) == [add, 1, 2]
t = (add, (add, 1, 2), (add, 3, 4))
assert list(preorder_traversal(t)) == [add, add, 1, 2, add, 3, 4]
t = (add, (sum, [1, 2]), 3)
assert list(preorder_traversal(t)) == [add, sum, list, 1, 2, 3]
def test_get():
assert get(d, ':x') == 1
assert get(d, ':y') == 2
assert get(d, ':z') == 3
assert get(d, 'pass-through') == 'pass-through'
def test_memoized_get():
try:
import toolz
except ImportError:
return
cache = dict()
getm = toolz.memoize(get, cache=cache, key=lambda args, kwargs: args[1:])
result = getm(d, ':z', get=getm)
assert result == 3
assert contains(cache, {(':x',): 1,
(':y',): 2,
(':z',): 3})
def test_data_not_in_dict_is_ok():
d = {'x': 1, 'y': (add, 'x', 10)}
assert get(d, 'y') == 11
def test_get_with_list():
d = {'x': 1, 'y': 2, 'z': (sum, ['x', 'y'])}
assert get(d, ['x', 'y']) == [1, 2]
assert get(d, 'z') == 3
def test_get_with_nested_list():
d = {'x': 1, 'y': 2, 'z': (sum, ['x', 'y'])}
assert get(d, [['x'], 'y']) == [[1], 2]
assert get(d, 'z') == 3
def test_get_works_with_unhashables_in_values():
f = lambda x, y: x + len(y)
d = {'x': 1, 'y': (f, 'x', set([1]))}
assert get(d, 'y') == 2
def test_get_laziness():
def isconcrete(arg):
return isinstance(arg, list)
d = {'x': 1, 'y': 2, 'z': (isconcrete, ['x', 'y'])}
assert get(d, ['x', 'y']) == [1, 2]
assert get(d, 'z') == False
def test_get_dependencies_nested():
dsk = {'x': 1, 'y': 2,
'z': (add, (inc, [['x']]), 'y')}
assert get_dependencies(dsk, 'z') == set(['x', 'y'])
def test_get_dependencies_empty():
dsk = {'x': (inc,)}
assert get_dependencies(dsk, 'x') == set()
def test_nested_tasks():
d = {'x': 1,
'y': (inc, 'x'),
'z': (add, (inc, 'x'), 'y')}
assert get(d, 'z') == 4
def test_get_stack_limit():
d = dict(('x%s' % (i+1), (inc, 'x%s' % i)) for i in range(10000))
d['x0'] = 0
assert get(d, 'x10000') == 10000
# introduce cycle
d['x5000'] = (inc, 'x5001')
assert raises(RuntimeError, lambda: get(d, 'x10000'))
assert get(d, 'x4999') == 4999
def test_flatten():
assert list(flatten(())) == []
assert list(flatten('foo')) == ['foo']
def test_subs():
assert subs((sum, [1, 'x']), 'x', 2) == (sum, [1, 2])
assert subs((sum, [1, ['x']]), 'x', 2) == (sum, [1, [2]])
def test_subs_with_unfriendly_eq():
try:
import numpy as np
except:
return
else:
task = (np.sum, np.array([1, 2]))
assert (subs(task, (4, 5), 1) == task) is True
def test_subs_with_surprisingly_friendly_eq():
try:
import pandas as pd
except:
return
else:
df = pd.DataFrame()
assert subs(df, 'x', 1) is df
def test_quote():
literals = [[1, 2, 3], (add, 1, 2),
[1, [2, 3]], (add, 1, (add, 2, 3))]
for l in literals:
assert get({'x': quote(l)}, 'x') == l
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/decomposition/plot_pca_iris.py | 1 | 1481 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn import decomposition
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| mit |
rgommers/statsmodels | statsmodels/nonparametric/kde.py | 1 | 18953 | """
Univariate Kernel Density Estimators
References
----------
Racine, Jeff. (2008) "Nonparametric Econometrics: A Primer," Foundation and
Trends in Econometrics: Vol 3: No 1, pp1-88.
http://dx.doi.org/10.1561/0800000009
http://en.wikipedia.org/wiki/Kernel_%28statistics%29
Silverman, B.W. Density Estimation for Statistics and Data Analysis.
"""
from __future__ import absolute_import, print_function
from statsmodels.compat.python import range
# for 2to3 with extensions
import warnings
import numpy as np
from scipy import integrate, stats
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
from . import bandwidths
from .kdetools import (forrt, revrt, silverman_transform, counts)
from .linbin import fast_linbin
#### Kernels Switch for estimators ####
kernel_switch = dict(gau=kernels.Gaussian, epa=kernels.Epanechnikov,
uni=kernels.Uniform, tri=kernels.Triangular,
biw=kernels.Biweight, triw=kernels.Triweight,
cos=kernels.Cosine, cos2=kernels.Cosine2)
def _checkisfit(self):
try:
self.density
except:
raise ValueError("Call fit to fit the density first")
#### Kernel Density Estimator Class ###
class KDEUnivariate(object):
"""
Univariate Kernel Density Estimator.
Parameters
----------
endog : array-like
The variable for which the density estimate is desired.
Notes
-----
If cdf, sf, cumhazard, or entropy are computed, they are computed based on
the definition of the kernel rather than the FFT approximation, even if
the density is fit with FFT = True.
`KDEUnivariate` is much faster than `KDEMultivariate`, due to its FFT-based
implementation. It should be preferred for univariate, continuous data.
`KDEMultivariate` also supports mixed data.
See Also
--------
KDEMultivariate
kdensity, kdensityfft
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> nobs = 300
>>> np.random.seed(1234) # Seed random generator
>>> dens = sm.nonparametric.KDEUnivariate(np.random.normal(size=nobs))
>>> dens.fit()
>>> plt.plot(dens.cdf)
>>> plt.show()
"""
def __init__(self, endog):
self.endog = np.asarray(endog)
def fit(self, kernel="gau", bw="normal_reference", fft=True, weights=None,
gridsize=None, adjust=1, cut=3, clip=(-np.inf, np.inf)):
"""
Attach the density estimate to the KDEUnivariate class.
Parameters
----------
kernel : str
The Kernel to be used. Choices are:
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(X),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(X),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, it is the bandwidth.
fft : bool
Whether or not to use FFT. FFT implementation is more
computationally efficient. However, only the Gaussian kernel
is implemented. If FFT is False, then a 'nobs' x 'gridsize'
intermediate array is created.
gridsize : int
If gridsize is None, max(len(X), 50) is used.
cut : float
Defines the length of the grid past the lowest and highest values
of X so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(X) or max(X)}
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
"""
try:
bw = float(bw)
self.bw_method = "user-given"
except:
self.bw_method = bw
endog = self.endog
if fft:
if kernel != "gau":
msg = "Only gaussian kernel is available for fft"
raise NotImplementedError(msg)
if weights is not None:
msg = "Weights are not implemented for fft"
raise NotImplementedError(msg)
density, grid, bw = kdensityfft(endog, kernel=kernel, bw=bw,
adjust=adjust, weights=weights, gridsize=gridsize,
clip=clip, cut=cut)
else:
density, grid, bw = kdensity(endog, kernel=kernel, bw=bw,
adjust=adjust, weights=weights, gridsize=gridsize,
clip=clip, cut=cut)
self.density = density
self.support = grid
self.bw = bw
self.kernel = kernel_switch[kernel](h=bw) # we instantiate twice,
# should this passed to funcs?
# put here to ensure empty cache after re-fit with new options
self.kernel.weights = weights
if weights is not None:
self.kernel.weights /= weights.sum()
self._cache = resettable_cache()
@cache_readonly
def cdf(self):
"""
Returns the cumulative distribution function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
density = self.density
kern = self.kernel
if kern.domain is None: # TODO: test for grid point at domain bound
a,b = -np.inf,np.inf
else:
a,b = kern.domain
func = lambda x,s: kern.density(s,x)
support = self.support
support = np.r_[a,support]
gridsize = len(support)
endog = self.endog
probs = [integrate.quad(func, support[i-1], support[i],
args=endog)[0] for i in range(1,gridsize)]
return np.cumsum(probs)
@cache_readonly
def cumhazard(self):
"""
Returns the hazard function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return -np.log(self.sf)
@cache_readonly
def sf(self):
"""
Returns the survival function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return 1 - self.cdf
@cache_readonly
def entropy(self):
"""
Returns the differential entropy evaluated at the support
Notes
-----
Will not work if fit has not been called. 1e-12 is added to each
probability to ensure that log(0) is not called.
"""
_checkisfit(self)
def entr(x,s):
pdf = kern.density(s,x)
return pdf*np.log(pdf+1e-12)
pdf = self.density
kern = self.kernel
if kern.domain is not None:
a,b = self.domain
else:
a,b = -np.inf,np.inf
endog = self.endog
#TODO: below could run into integr problems, cf. stats.dist._entropy
return -integrate.quad(entr, a,b, args=(endog,))[0]
@cache_readonly
def icdf(self):
"""
Inverse Cumulative Distribution (Quantile) Function
Notes
-----
Will not work if fit has not been called. Uses
`scipy.stats.mstats.mquantiles`.
"""
_checkisfit(self)
gridsize = len(self.density)
return stats.mstats.mquantiles(self.endog, np.linspace(0,1,
gridsize))
def evaluate(self, point):
"""
Evaluate density at a single point.
Parameters
----------
point : float
Point at which to evaluate the density.
"""
_checkisfit(self)
return self.kernel.density(self.endog, point)
class KDE(KDEUnivariate):
def __init__(self, endog):
self.endog = np.asarray(endog)
warnings.warn("KDE is deprecated and will be removed in 0.6, "
"use KDEUnivariate instead", FutureWarning)
#### Kernel Density Estimator Functions ####
def kdensity(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None,
adjust=1, clip=(-np.inf,np.inf), cut=3, retgrid=True):
"""
Rosenblatt-Parzen univariate kernel density estimator.
Parameters
----------
X : array-like
The variable for which the density estimate is desired.
kernel : str
The Kernel to be used. Choices are
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float
"scott" - 1.059 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
"silverman" - .9 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
If a float is given, it is the bandwidth.
weights : array or None
Optional weights. If the X value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, max(len(X), 50) is used.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in X that are outside of the range given by clip are
dropped. The number of observations in X is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of X
so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(X) or max(X)}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : array
The densities estimated at the grid points.
grid : array, optional
The grid points at which the density is estimated.
Notes
-----
Creates an intermediate (`gridsize` x `nobs`) array. Use FFT for a more
computationally efficient version.
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:,None]
clip_x = np.logical_and(X>clip[0], X<clip[1])
X = X[clip_x]
nobs = float(len(X)) # after trim
if gridsize == None:
gridsize = max(nobs,50) # don't need to resize if no FFT
# handle weights
if weights is None:
weights = np.ones(nobs)
q = nobs
else:
# ensure weights is a numpy array
weights = np.asarray(weights)
if len(weights) != len(clip_x):
msg = "The length of the weights must be the same as the given X."
raise ValueError(msg)
weights = weights[clip_x.squeeze()]
q = weights.sum()
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
# if bw is None, select optimal bandwidth for kernel
try:
bw = float(bw)
except:
bw = bandwidths.select_bandwidth(X, bw, kern)
bw *= adjust
a = np.min(X,axis=0) - cut*bw
b = np.max(X,axis=0) + cut*bw
grid = np.linspace(a, b, gridsize)
k = (X.T - grid[:,None])/bw # uses broadcasting to make a gridsize x nobs
# set kernel bandwidth
kern.seth(bw)
# truncate to domain
if kern.domain is not None: # won't work for piecewise kernels like parzen
z_lo, z_high = kern.domain
domain_mask = (k < z_lo) | (k > z_high)
k = kern(k) # estimate density
k[domain_mask] = 0
else:
k = kern(k) # estimate density
k[k<0] = 0 # get rid of any negative values, do we need this?
dens = np.dot(k,weights)/(q*bw)
if retgrid:
return dens, grid, bw
else:
return dens, bw
def kdensityfft(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None,
adjust=1, clip=(-np.inf,np.inf), cut=3, retgrid=True):
"""
Rosenblatt-Parzen univariate kernel density estimator
Parameters
----------
X : array-like
The variable for which the density estimate is desired.
kernel : str
ONLY GAUSSIAN IS CURRENTLY IMPLEMENTED.
"bi" for biweight
"cos" for cosine
"epa" for Epanechnikov, default
"epa2" for alternative Epanechnikov
"gau" for Gaussian.
"par" for Parzen
"rect" for rectangular
"tri" for triangular
bw : str, float
"scott" - 1.059 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
"silverman" - .9 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
If a float is given, it is the bandwidth.
weights : array or None
WEIGHTS ARE NOT CURRENTLY IMPLEMENTED.
Optional weights. If the X value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, min(len(X), 512) is used. Note that the provided
number is rounded up to the next highest power of 2.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in X that are outside of the range given by clip are
dropped. The number of observations in X is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of X
so that the kernel goes to zero. The end points are
-/+ cut*bw*{X.min() or X.max()}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : array
The densities estimated at the grid points.
grid : array, optional
The grid points at which the density is estimated.
Notes
-----
Only the default kernel is implemented. Weights aren't implemented yet.
This follows Silverman (1982) with changes suggested by Jones and Lotwick
(1984). However, the discretization step is replaced by linear binning
of Fan and Marron (1994). This should be extended to accept the parts
that are dependent only on the data to speed things up for
cross-validation.
References
---------- ::
Fan, J. and J.S. Marron. (1994) `Fast implementations of nonparametric
curve estimators`. Journal of Computational and Graphical Statistics.
3.1, 35-56.
Jones, M.C. and H.W. Lotwick. (1984) `Remark AS R50: A Remark on Algorithm
AS 176. Kernal Density Estimation Using the Fast Fourier Transform`.
Journal of the Royal Statistical Society. Series C. 33.1, 120-2.
Silverman, B.W. (1982) `Algorithm AS 176. Kernel density estimation using
the Fast Fourier Transform. Journal of the Royal Statistical Society.
Series C. 31.2, 93-9.
"""
X = np.asarray(X)
X = X[np.logical_and(X>clip[0], X<clip[1])] # won't work for two columns.
# will affect underlying data?
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
try:
bw = float(bw)
except:
bw = bandwidths.select_bandwidth(X, bw, kern) # will cross-val fit this pattern?
bw *= adjust
nobs = float(len(X)) # after trim
# 1 Make grid and discretize the data
if gridsize == None:
gridsize = np.max((nobs,512.))
gridsize = 2**np.ceil(np.log2(gridsize)) # round to next power of 2
a = np.min(X)-cut*bw
b = np.max(X)+cut*bw
grid,delta = np.linspace(a,b,gridsize,retstep=True)
RANGE = b-a
#TODO: Fix this?
# This is the Silverman binning function, but I believe it's buggy (SS)
# weighting according to Silverman
# count = counts(X,grid)
# binned = np.zeros_like(grid) #xi_{k} in Silverman
# j = 0
# for k in range(int(gridsize-1)):
# if count[k]>0: # there are points of X in the grid here
# Xingrid = X[j:j+count[k]] # get all these points
# # get weights at grid[k],grid[k+1]
# binned[k] += np.sum(grid[k+1]-Xingrid)
# binned[k+1] += np.sum(Xingrid-grid[k])
# j += count[k]
# binned /= (nobs)*delta**2 # normalize binned to sum to 1/delta
#NOTE: THE ABOVE IS WRONG, JUST TRY WITH LINEAR BINNING
binned = fast_linbin(X,a,b,gridsize)/(delta*nobs)
# step 2 compute FFT of the weights, using Munro (1976) FFT convention
y = forrt(binned)
# step 3 and 4 for optimal bw compute zstar and the density estimate f
# don't have to redo the above if just changing bw, ie., for cross val
#NOTE: silverman_transform is the closed form solution of the FFT of the
#gaussian kernel. Not yet sure how to generalize it.
zstar = silverman_transform(bw, gridsize, RANGE)*y # 3.49 in Silverman
# 3.50 w Gaussian kernel
f = revrt(zstar)
if retgrid:
return f, grid, bw
else:
return f, bw
if __name__ == "__main__":
import numpy as np
np.random.seed(12345)
xi = np.random.randn(100)
f,grid, bw1 = kdensity(xi, kernel="gau", bw=.372735, retgrid=True)
f2, bw2 = kdensityfft(xi, kernel="gau", bw="silverman",retgrid=False)
# do some checking vs. silverman algo.
# you need denes.f, http://lib.stat.cmu.edu/apstat/176
#NOTE: I (SS) made some changes to the Fortran
# and the FFT stuff from Munro http://lib.stat.cmu.edu/apstat/97o
# then compile everything and link to denest with f2py
#Make pyf file as usual, then compile shared object
#f2py denest.f -m denest2 -h denest.pyf
#edit pyf
#-c flag makes it available to other programs, fPIC builds a shared library
#/usr/bin/gfortran -Wall -c -fPIC fft.f
#f2py -c denest.pyf ./fft.o denest.f
try:
from denest2 import denest # @UnresolvedImport
a = -3.4884382032045504
b = 4.3671504686785605
RANGE = b - a
bw = bandwidths.bw_silverman(xi)
ft,smooth,ifault,weights,smooth1 = denest(xi,a,b,bw,np.zeros(512),np.zeros(512),0,
np.zeros(512), np.zeros(512))
# We use a different binning algo, so only accurate up to 3 decimal places
np.testing.assert_almost_equal(f2, smooth, 3)
#NOTE: for debugging
# y2 = forrt(weights)
# RJ = np.arange(512/2+1)
# FAC1 = 2*(np.pi*bw/RANGE)**2
# RJFAC = RJ**2*FAC1
# BC = 1 - RJFAC/(6*(bw/((b-a)/M))**2)
# FAC = np.exp(-RJFAC)/BC
# SMOOTH = np.r_[FAC,FAC[1:-1]] * y2
# dens = revrt(SMOOTH)
except:
# ft = np.loadtxt('./ft_silver.csv')
# smooth = np.loadtxt('./smooth_silver.csv')
print("Didn't get the estimates from the Silverman algorithm")
| bsd-3-clause |
legacysurvey/pipeline | py/legacypipe/survey.py | 1 | 61399 | from __future__ import print_function
import os
import tempfile
import numpy as np
import fitsio
from astrometry.util.fits import fits_table, merge_tables
from astrometry.util.file import trymakedirs
from tractor.ellipses import EllipseESoft, EllipseE
from tractor.galaxy import ExpGalaxy
from tractor import PointSource, ParamList, ConstantFitsWcs
from legacypipe.utils import EllipseWithPriors, galaxy_min_re
import logging
logger = logging.getLogger('legacypipe.survey')
def info(*args):
from legacypipe.utils import log_info
log_info(logger, args)
def debug(*args):
from legacypipe.utils import log_debug
log_debug(logger, args)
# search order: $TMPDIR, $TEMP, $TMP, then /tmp, /var/tmp, /usr/tmp
tempdir = tempfile.gettempdir()
# The apertures we use in aperture photometry, in ARCSEC radius
apertures_arcsec = np.array([0.5, 0.75, 1., 1.5, 2., 3.5, 5., 7.])
# Ugly hack: for sphinx documentation, the astrometry and tractor (and
# other) packages are replaced by mock objects. But you can't
# subclass a mock object correctly, so we have to un-mock
# EllipseWithPriors here.
if 'Mock' in str(type(EllipseWithPriors)):
class duck(object):
pass
EllipseWithPriors = duck
def year_to_mjd(year):
# year_to_mjd(2015.5) -> 57205.875
from tractor.tractortime import TAITime
return (year - 2000.) * TAITime.daysperyear + TAITime.mjd2k
def mjd_to_year(mjd):
# mjd_to_year(57205.875) -> 2015.5
from tractor.tractortime import TAITime
return (mjd - TAITime.mjd2k) / TAITime.daysperyear + 2000.
def tai_to_mjd(tai):
return tai / (24. * 3600.)
def radec_at_mjd(ra, dec, ref_year, pmra, pmdec, parallax, mjd):
'''
Units:
- matches Gaia DR1/DR2
- pmra,pmdec are in mas/yr. pmra is in angular speed (ie, has a cos(dec) factor)
- parallax is in mas.
NOTE: does not broadcast completely correctly -- all params
vectors or all motion params vector + scalar mjd work fine. Other
combos: not certain.
Returns RA,Dec
'''
from tractor.tractortime import TAITime
from astrometry.util.starutil_numpy import radectoxyz, arcsecperrad, axistilt, xyztoradec
dt = mjd_to_year(mjd) - ref_year
cosdec = np.cos(np.deg2rad(dec))
dec = dec + dt * pmdec / (3600. * 1000.)
ra = ra + (dt * pmra / (3600. * 1000.)) / cosdec
parallax = np.atleast_1d(parallax)
I = np.flatnonzero(parallax)
if len(I):
scalar = np.isscalar(ra) and np.isscalar(dec)
ra = np.atleast_1d(ra)
dec = np.atleast_1d(dec)
suntheta = 2.*np.pi * np.fmod((mjd - TAITime.equinox) / TAITime.daysperyear, 1.0)
# Finite differences on the unit sphere -- xyztoradec handles
# points that are not exactly on the surface of the sphere.
axis = np.deg2rad(axistilt)
scale = parallax[I] / 1000. / arcsecperrad
xyz = radectoxyz(ra[I], dec[I])
xyz[:,0] += scale * np.cos(suntheta)
xyz[:,1] += scale * np.sin(suntheta) * np.cos(axis)
xyz[:,2] += scale * np.sin(suntheta) * np.sin(axis)
r,d = xyztoradec(xyz)
ra [I] = r
dec[I] = d
# radectoxyz / xyztoradec do weird broadcasting
if scalar:
ra = ra[0]
dec = dec[0]
return ra,dec
# Gaia measures positions better than we will, we assume, so the
# GaiaPosition class pretends that it does not have any parameters
# that can be optimized; therefore they stay fixed.
class GaiaPosition(ParamList):
def __init__(self, ra, dec, ref_epoch, pmra, pmdec, parallax):
'''
Units:
- matches Gaia DR1
- pmra,pmdec are in mas/yr. pmra is in angular speed (ie, has a cos(dec) factor)
- parallax is in mas.
- ref_epoch: year (eg 2015.5)
'''
self.ra = ra
self.dec = dec
self.ref_epoch = float(ref_epoch)
self.pmra = pmra
self.pmdec = pmdec
self.parallax = parallax
super(GaiaPosition, self).__init__()
self.cached_positions = {}
def copy(self):
return GaiaPosition(self.ra, self.dec, self.ref_epoch, self.pmra, self.pmdec,
self.parallax)
def getPositionAtTime(self, mjd):
from tractor import RaDecPos
try:
return self.cached_positions[mjd]
except KeyError:
# not cached
pass
if self.pmra == 0. and self.pmdec == 0. and self.parallax == 0.:
pos = RaDecPos(self.ra, self.dec)
self.cached_positions[mjd] = pos
return pos
ra,dec = radec_at_mjd(self.ra, self.dec, self.ref_epoch,
self.pmra, self.pmdec, self.parallax, mjd)
pos = RaDecPos(ra, dec)
self.cached_positions[mjd] = pos
return pos
@staticmethod
def getName():
return 'GaiaPosition'
def __str__(self):
return ('%s: RA, Dec = (%.5f, %.5f), pm (%.1f, %.1f), parallax %.3f' %
(self.getName(), self.ra, self.dec, self.pmra, self.pmdec, self.parallax))
class GaiaSource(PointSource):
@staticmethod
def getName():
return 'GaiaSource'
def getSourceType(self):
return 'GaiaSource'
@classmethod
def from_catalog(cls, g, bands):
from tractor import NanoMaggies
# Gaia has NaN entries when no proper motion or parallax is measured.
# Convert to zeros.
def nantozero(x):
if not np.isfinite(x):
return 0.
return x
pos = GaiaPosition(g.ra, g.dec, g.ref_epoch,
nantozero(g.pmra),
nantozero(g.pmdec),
nantozero(g.parallax))
# initialize from decam_mag_B if available, otherwise Gaia G.
fluxes = {}
for band in bands:
try:
mag = g.get('decam_mag_%s' % band)
except KeyError:
mag = g.phot_g_mean_mag
fluxes[band] = NanoMaggies.magToNanomaggies(mag)
bright = NanoMaggies(order=bands, **fluxes)
src = cls(pos, bright)
src.forced_point_source = g.pointsource
src.reference_star = getattr(g, 'isgaia', False) or getattr(g, 'isbright', False)
return src
#
# We need a subclass of the standand WCS class to handle moving sources.
#
class LegacySurveyWcs(ConstantFitsWcs):
def __init__(self, wcs, tai):
super(LegacySurveyWcs, self).__init__(wcs)
self.tai = tai
def copy(self):
return LegacySurveyWcs(self.wcs, self.tai)
def positionToPixel(self, pos, src=None):
if isinstance(pos, GaiaPosition):
pos = pos.getPositionAtTime(tai_to_mjd(self.tai.getValue()))
return super(LegacySurveyWcs, self).positionToPixel(pos, src=src)
class LegacyEllipseWithPriors(EllipseWithPriors):
# Prior on (softened) ellipticity: Gaussian with this standard deviation
ellipticityStd = 0.25
from tractor.sersic import SersicIndex
class LegacySersicIndex(SersicIndex):
def __init__(self, val=0):
super(LegacySersicIndex, self).__init__(val=val)
self.lower = 0.5
self.upper = 6.0
class LogRadius(EllipseESoft):
''' Class used during fitting of the RexGalaxy type -- an ellipse
type where only the radius is variable, and is represented in log
space.'''
def __init__(self, *args, **kwargs):
super(LogRadius, self).__init__(*args, **kwargs)
self.lowers = [None]
# MAGIC -- 10" default max r_e!
# SEE ALSO utils.py : class(EllipseWithPriors)!
self.uppers = [np.log(10.)]
self.lowers = [np.log(galaxy_min_re)]
def isLegal(self):
return ((self.logre <= self.uppers[0]) and
(self.logre >= self.lowers[0]))
def setMaxLogRadius(self, rmax):
self.uppers[0] = rmax
def getMaxLogRadius(self):
return self.uppers[0]
@staticmethod
def getName():
return 'LogRadius'
@staticmethod
def getNamedParams():
# log r: log of effective radius in arcsec
return dict(logre=0)
def __repr__(self):
return 'log r_e=%g' % (self.logre)
@property
def theta(self):
return 0.
@property
def e(self):
return 0.
class RexGalaxy(ExpGalaxy):
'''This defines the 'REX' galaxy profile -- an exponential profile
that is round (zero ellipticity) with variable radius.
It is used to measure marginally-resolved galaxies.
The real action (what makes it a Rex) happens when it is constructed,
via, eg,
rex = RexGalaxy(position, brightness, LogRadius(0.))
(which happens in oneblob.py)
'''
def __init__(self, *args):
super(RexGalaxy, self).__init__(*args)
def getName(self):
return 'RexGalaxy'
class SimpleGalaxy(ExpGalaxy):
'''This defines the 'SIMP' galaxy profile -- an exponential profile
with a fixed shape of a 0.45 arcsec effective radius and spherical
shape. It is used to detect marginally-resolved galaxies.
'''
shape = EllipseE(0.45, 0., 0.)
def __init__(self, *args):
super(SimpleGalaxy, self).__init__(*args)
self.shape = SimpleGalaxy.shape
def __str__(self):
return (self.name + ' at ' + str(self.pos)
+ ' with ' + str(self.brightness))
def __repr__(self):
return (self.name + '(pos=' + repr(self.pos) +
', brightness=' + repr(self.brightness) + ')')
@staticmethod
def getNamedParams():
return dict(pos=0, brightness=1)
def getName(self):
return 'SimpleGalaxy'
### HACK -- for Galaxy.getParamDerivatives()
def isParamFrozen(self, pname):
if pname == 'shape':
return True
return super(SimpleGalaxy, self).isParamFrozen(pname)
class BrickDuck(object):
'''A little duck-typing class when running on a custom RA,Dec center
rather than a brick center.
'''
def __init__(self, ra, dec, brickname):
self.ra = ra
self.dec = dec
self.brickname = brickname
self.brickid = -1
def get_git_version(dirnm=None):
'''
Runs 'git describe' in the current directory (or given dir) and
returns the result as a string.
Parameters
----------
dirnm : string
If non-None, "cd" to the given directory before running 'git describe'
Returns
-------
Git version string
'''
from astrometry.util.run_command import run_command
cmd = ''
if dirnm is None:
# Get the git version of the legacypipe product
import legacypipe
dirnm = os.path.dirname(legacypipe.__file__)
cmd = "cd '%s' && git describe" % dirnm
rtn,version,err = run_command(cmd)
if rtn:
raise RuntimeError('Failed to get version string (%s): ' % cmd +
version + err)
version = version.strip()
return version
def get_version_header(program_name, survey_dir, release, git_version=None):
'''
Creates a fitsio header describing a DECaLS data product.
'''
import datetime
if program_name is None:
import sys
program_name = sys.argv[0]
if git_version is None:
git_version = get_git_version()
hdr = fitsio.FITSHDR()
#lsdir_prefix1 = '/global/project/projectdirs'
#lsdir_prefix2 = '/global/projecta/projectdirs'
for s in [
'Data product of the DESI Imaging Legacy Survey (DECaLS)',
'Full documentation at http://legacysurvey.org',
]:
hdr.add_record(dict(name='COMMENT', value=s, comment=s))
hdr.add_record(dict(name='LEGPIPEV', value=git_version,
comment='legacypipe git version'))
#hdr.add_record(dict(name='LSDIRPFX', value=lsdir_prefix1,
# comment='LegacySurveys Directory Prefix'))
hdr.add_record(dict(name='LSDIR', value=survey_dir,
comment='$LEGACY_SURVEY_DIR directory'))
hdr.add_record(dict(name='LSDR', value='DR9',
comment='Data release number'))
hdr.add_record(dict(name='RUNDATE', value=datetime.datetime.now().isoformat(),
comment='%s run time' % program_name))
hdr.add_record(dict(name='SURVEY', value='DECaLS+BASS+MzLS',
comment='The LegacySurveys'))
# Requested by NOAO
hdr.add_record(dict(name='SURVEYID', value='DECaLS BASS MzLS',
comment='Survey names'))
#hdr.add_record(dict(name='SURVEYID', value='DECam Legacy Survey (DECaLS)',
#hdr.add_record(dict(name='SURVEYID', value='BASS MzLS',
hdr.add_record(dict(name='DRVERSIO', value=release,
comment='LegacySurveys Data Release number'))
hdr.add_record(dict(name='OBSTYPE', value='object',
comment='Observation type'))
hdr.add_record(dict(name='PROCTYPE', value='tile',
comment='Processing type'))
import socket
hdr.add_record(dict(name='NODENAME', value=socket.gethostname(),
comment='Machine where script was run'))
#hdr.add_record(dict(name='HOSTFQDN', value=socket.getfqdn(),comment='Machine where script was run'))
hdr.add_record(dict(name='HOSTNAME', value=os.environ.get('NERSC_HOST', 'none'),
comment='NERSC machine where script was run'))
hdr.add_record(dict(name='JOB_ID', value=os.environ.get('SLURM_JOB_ID', 'none'),
comment='SLURM job id'))
hdr.add_record(dict(name='ARRAY_ID', value=os.environ.get('ARRAY_TASK_ID', 'none'),
comment='SLURM job array id'))
return hdr
def get_dependency_versions(unwise_dir, unwise_tr_dir, unwise_modelsky_dir):
import astrometry
import astropy
import matplotlib
try:
import mkl_fft
except:
mkl_fft = None
import photutils
import tractor
import scipy
import unwise_psf
depvers = []
headers = []
default_ver = 'UNAVAILABLE'
for name,pkg in [('astrometry', astrometry),
('astropy', astropy),
('fitsio', fitsio),
('matplotlib', matplotlib),
('mkl_fft', mkl_fft),
('numpy', np),
('photutils', photutils),
('scipy', scipy),
('tractor', tractor),
('unwise_psf', unwise_psf),
]:
if pkg is None:
depvers.append((name, 'none'))
continue
#depvers.append((name + '-path', os.path.dirname(pkg.__file__)))
try:
depvers.append((name, pkg.__version__))
except:
try:
depvers.append((name,
get_git_version(os.path.dirname(pkg.__file__))))
except:
pass
# Get additional paths from environment variables
dep = 'LARGEGALAXIES_CAT'
value = os.environ.get(dep, default_ver)
if value == default_ver:
print('Warning: failed to get version string for "%s"' % dep)
else:
depvers.append((dep, value))
if os.path.exists(value):
from legacypipe.reference import get_large_galaxy_version
ver,preburn = get_large_galaxy_version(value)
depvers.append(('LARGEGALAXIES_VER', ver))
depvers.append(('LARGEGALAXIES_PREBURN', preburn))
for dep in ['TYCHO2_KD', 'GAIA_CAT']:
value = os.environ.get('%s_DIR' % dep, default_ver)
if value == default_ver:
print('Warning: failed to get version string for "%s"' % dep)
else:
depvers.append((dep, value))
if unwise_dir is not None:
dirs = unwise_dir.split(':')
depvers.append(('unwise', unwise_dir))
for i,d in enumerate(dirs):
headers.append(('UNWISD%i' % (i+1), d, ''))
if unwise_tr_dir is not None:
depvers.append(('unwise_tr', unwise_tr_dir))
# this is assumed to be only a single directory
headers.append(('UNWISTD', unwise_tr_dir, ''))
if unwise_modelsky_dir is not None:
depvers.append(('unwise_modelsky', unwise_modelsky_dir))
# this is assumed to be only a single directory
headers.append(('UNWISSKY', unwise_modelsky_dir, ''))
for i,(name,value) in enumerate(depvers):
headers.append(('DEPNAM%02i' % i, name, ''))
headers.append(('DEPVER%02i' % i, value, ''))
return headers
class MyFITSHDR(fitsio.FITSHDR):
'''
This is copied straight from fitsio, simply removing "BUNIT" from
the list of headers to remove. This is required to format the
tractor catalogs the way we want them.
'''
def clean(self, **kwargs):
"""
Remove reserved keywords from the header.
These are keywords that the fits writer must write in order
to maintain consistency between header and data.
"""
rmnames = ['SIMPLE','EXTEND','XTENSION','BITPIX','PCOUNT','GCOUNT',
'THEAP',
'EXTNAME',
#'BUNIT',
'BSCALE','BZERO','BLANK',
'ZQUANTIZ','ZDITHER0','ZIMAGE','ZCMPTYPE',
'ZSIMPLE','ZTENSION','ZPCOUNT','ZGCOUNT',
'ZBITPIX','ZEXTEND',
#'FZTILELN','FZALGOR',
'CHECKSUM','DATASUM']
self.delete(rmnames)
r = self._record_map.get('NAXIS',None)
if r is not None:
naxis = int(r['value'])
self.delete('NAXIS')
rmnames = ['NAXIS%d' % i for i in xrange(1,naxis+1)]
self.delete(rmnames)
r = self._record_map.get('ZNAXIS',None)
self.delete('ZNAXIS')
if r is not None:
znaxis = int(r['value'])
rmnames = ['ZNAXIS%d' % i for i in xrange(1,znaxis+1)]
self.delete(rmnames)
rmnames = ['ZTILE%d' % i for i in xrange(1,znaxis+1)]
self.delete(rmnames)
rmnames = ['ZNAME%d' % i for i in xrange(1,znaxis+1)]
self.delete(rmnames)
rmnames = ['ZVAL%d' % i for i in xrange(1,znaxis+1)]
self.delete(rmnames)
r = self._record_map.get('TFIELDS',None)
if r is not None:
tfields = int(r['value'])
self.delete('TFIELDS')
if tfields > 0:
nbase = ['TFORM','TTYPE','TDIM','TUNIT','TSCAL','TZERO',
'TNULL','TDISP','TDMIN','TDMAX','TDESC','TROTA',
'TRPIX','TRVAL','TDELT','TCUNI',
#'FZALG'
]
for i in xrange(1,tfields+1):
names=['%s%d' % (n,i) for n in nbase]
self.delete(names)
def tim_get_resamp(tim, targetwcs):
from astrometry.util.resample import resample_with_wcs,OverlapError
if hasattr(tim, 'resamp'):
return tim.resamp
try:
Yo,Xo,Yi,Xi,_ = resample_with_wcs(targetwcs, tim.subwcs, intType=np.int16)
except OverlapError:
print('No overlap')
return None
if len(Yo) == 0:
return None
return Yo,Xo,Yi,Xi
def sdss_rgb(imgs, bands, scales=None, m=0.03, Q=20, mnmx=None):
rgbscales=dict(g=(2, 6.0),
r=(1, 3.4),
i=(0, 3.0),
z=(0, 2.2))
# rgbscales = {'u': 1.5, #1.0,
# 'g': 2.5,
# 'r': 1.5,
# 'i': 1.0,
# 'z': 0.4, #0.3
# }
if scales is not None:
rgbscales.update(scales)
I = 0
for img,band in zip(imgs, bands):
plane,scale = rgbscales[band]
img = np.maximum(0, img * scale + m)
I = I + img
I /= len(bands)
if Q is not None:
fI = np.arcsinh(Q * I) / np.sqrt(Q)
I += (I == 0.) * 1e-6
I = fI / I
H,W = I.shape
rgb = np.zeros((H,W,3), np.float32)
for img,band in zip(imgs, bands):
plane,scale = rgbscales[band]
if mnmx is None:
rgb[:,:,plane] = np.clip((img * scale + m) * I, 0, 1)
else:
mn,mx = mnmx
rgb[:,:,plane] = np.clip(((img * scale + m) - mn) / (mx - mn), 0, 1)
return rgb
def get_rgb(imgs, bands,
resids=False, mnmx=None, arcsinh=None):
'''
Given a list of images in the given bands, returns a scaled RGB
image.
*imgs* a list of numpy arrays, all the same size, in nanomaggies
*bands* a list of strings, eg, ['g','r','z']
*mnmx* = (min,max), values that will become black/white *after* scaling.
Default is (-3,10)
*arcsinh* use nonlinear scaling as in SDSS
*scales*
Returns a (H,W,3) numpy array with values between 0 and 1.
'''
# (ignore arcsinh...)
if resids:
mnmx = (-0.1, 0.1)
if mnmx is not None:
return sdss_rgb(imgs, bands, m=0., Q=None, mnmx=mnmx)
return sdss_rgb(imgs, bands)
def wcs_for_brick(b, W=3600, H=3600, pixscale=0.262):
'''
Returns an astrometry.net style Tan WCS object for a given brick object.
b: row from survey-bricks.fits file
W,H: size in pixels
pixscale: pixel scale in arcsec/pixel.
Returns: Tan wcs object
'''
from astrometry.util.util import Tan
pixscale = pixscale / 3600.
return Tan(b.ra, b.dec, W/2.+0.5, H/2.+0.5,
-pixscale, 0., 0., pixscale,
float(W), float(H))
def bricks_touching_wcs(targetwcs, survey=None, B=None, margin=20):
'''
Finds LegacySurvey bricks touching a given WCS header object.
Parameters
----------
targetwcs : astrometry.util.Tan object or similar
The region of sky to search
survey : legacypipe.survey.LegacySurveyData object
From which the brick table will be retrieved
B : FITS table
The table of brick objects to search
margin : int
Margin in pixels around the outside of the WCS
Returns
-------
A table (subset of B, if given) containing the bricks touching the
given WCS region + margin.
'''
from astrometry.libkd.spherematch import match_radec
from astrometry.util.miscutils import clip_wcs
if B is None:
assert(survey is not None)
B = survey.get_bricks_readonly()
ra,dec = targetwcs.radec_center()
radius = targetwcs.radius()
# MAGIC 0.4 degree search radius =
# DECam hypot(1024,2048)*0.27/3600 + Brick hypot(0.25, 0.25) ~= 0.35 + margin
I,_,_ = match_radec(B.ra, B.dec, ra, dec,
radius + np.hypot(0.25,0.25)/2. + 0.05)
debug(len(I), 'bricks nearby')
keep = []
for i in I:
b = B[i]
brickwcs = wcs_for_brick(b)
clip = clip_wcs(targetwcs, brickwcs)
if len(clip) == 0:
debug('No overlap with brick', b.brickname)
continue
keep.append(i)
return B[np.array(keep)]
def ccds_touching_wcs(targetwcs, ccds, ccdrad=None, polygons=True):
'''
targetwcs: wcs object describing region of interest
ccds: fits_table object of CCDs
ccdrad: radius of CCDs, in degrees.
If None (the default), compute from the CCDs table.
(0.17 for DECam)
Returns: index array I of CCDs within range.
'''
from astrometry.util.util import Tan
from astrometry.util.miscutils import polygons_intersect
from astrometry.util.starutil_numpy import degrees_between
trad = targetwcs.radius()
if ccdrad is None:
ccdrad = max(np.sqrt(np.abs(ccds.cd1_1 * ccds.cd2_2 -
ccds.cd1_2 * ccds.cd2_1)) *
np.hypot(ccds.width, ccds.height) / 2.)
rad = trad + ccdrad
r,d = targetwcs.radec_center()
I, = np.where(np.abs(ccds.dec - d) < rad)
I = I[np.where(degrees_between(r, d, ccds.ra[I], ccds.dec[I]) < rad)[0]]
if not polygons:
return I
# now check actual polygon intersection
tw,th = targetwcs.imagew, targetwcs.imageh
targetpoly = [(0.5,0.5),(tw+0.5,0.5),(tw+0.5,th+0.5),(0.5,th+0.5)]
cd = targetwcs.get_cd()
tdet = cd[0]*cd[3] - cd[1]*cd[2]
if tdet > 0:
targetpoly = list(reversed(targetpoly))
targetpoly = np.array(targetpoly)
keep = []
for i in I:
W,H = ccds.width[i],ccds.height[i]
wcs = Tan(*[float(x) for x in
[ccds.crval1[i], ccds.crval2[i], ccds.crpix1[i], ccds.crpix2[i],
ccds.cd1_1[i], ccds.cd1_2[i], ccds.cd2_1[i], ccds.cd2_2[i], W, H]])
cd = wcs.get_cd()
wdet = cd[0]*cd[3] - cd[1]*cd[2]
poly = []
for x,y in [(0.5,0.5),(W+0.5,0.5),(W+0.5,H+0.5),(0.5,H+0.5)]:
rr,dd = wcs.pixelxy2radec(x,y)
ok,xx,yy = targetwcs.radec2pixelxy(rr,dd)
poly.append((xx,yy))
if wdet > 0:
poly = list(reversed(poly))
poly = np.array(poly)
if polygons_intersect(targetpoly, poly):
keep.append(i)
I = np.array(keep)
return I
def create_temp(**kwargs):
f,fn = tempfile.mkstemp(dir=tempdir, **kwargs)
os.close(f)
os.unlink(fn)
return fn
def imsave_jpeg(jpegfn, img, **kwargs):
'''Saves a image in JPEG format. Some matplotlib installations
don't support jpeg, so we optionally write to PNG and then convert
to JPEG using the venerable netpbm tools.
*jpegfn*: JPEG filename
*img*: image, in the typical matplotlib formats (see plt.imsave)
'''
import pylab as plt
if True:
kwargs.update(format='jpg')
plt.imsave(jpegfn, img, **kwargs)
else:
tmpfn = create_temp(suffix='.png')
plt.imsave(tmpfn, img, **kwargs)
cmd = ('pngtopnm %s | pnmtojpeg -quality 90 > %s' % (tmpfn, jpegfn))
rtn = os.system(cmd)
print(cmd, '->', rtn)
os.unlink(tmpfn)
class LegacySurveyData(object):
'''
A class describing the contents of a LEGACY_SURVEY_DIR directory --
tables of CCDs and of bricks, and calibration data. Methods for
dealing with the CCDs and bricks tables.
This class is also responsible for creating LegacySurveyImage
objects (eg, DecamImage objects), which then allow data to be read
from disk.
'''
def __init__(self, survey_dir=None, cache_dir=None, output_dir=None,
allbands='grz'):
'''Create a LegacySurveyData object using data from the given
*survey_dir* directory, or from the $LEGACY_SURVEY_DIR environment
variable.
Parameters
----------
survey_dir : string
Defaults to $LEGACY_SURVEY_DIR environment variable. Where to look for
files including calibration files, tables of CCDs and bricks, image data,
etc.
cache_dir : string
Directory to search for input files before looking in survey_dir. Useful
for, eg, Burst Buffer.
output_dir : string
Base directory for output files; default ".".
'''
from legacypipe.decam import DecamImage
from legacypipe.mosaic import MosaicImage
from legacypipe.bok import BokImage
from legacypipe.ptf import PtfImage
from legacypipe.cfht import MegaPrimeImage
from collections import OrderedDict
if survey_dir is None:
survey_dir = os.environ.get('LEGACY_SURVEY_DIR')
if survey_dir is None:
print('Warning: you should set the $LEGACY_SURVEY_DIR environment variable.')
print('Using the current directory as LEGACY_SURVEY_DIR.')
survey_dir = os.getcwd()
self.survey_dir = survey_dir
self.cache_dir = cache_dir
if output_dir is None:
self.output_dir = '.'
else:
self.output_dir = output_dir
self.output_file_hashes = OrderedDict()
self.ccds = None
self.bricks = None
self.ccds_index = None
# Create and cache a kd-tree for bricks_touching_radec_box ?
self.cache_tree = False
self.bricktree = None
### HACK! Hard-coded brick edge size, in degrees!
self.bricksize = 0.25
# Cached CCD kd-tree --
# - initially None, then a list of (fn, kd)
self.ccd_kdtrees = None
self.image_typemap = {
'decam' : DecamImage,
'decam+noise' : DecamImage,
'mosaic' : MosaicImage,
'mosaic3': MosaicImage,
'90prime': BokImage,
'ptf' : PtfImage,
'megaprime': MegaPrimeImage,
}
self.allbands = allbands
# Filename prefix for coadd files
self.file_prefix = 'legacysurvey'
def __str__(self):
return ('%s: dir %s, out %s' %
(type(self).__name__, self.survey_dir, self.output_dir))
def get_default_release(self):
return None
def ccds_for_fitting(self, brick, ccds):
# By default, use all.
return None
def image_class_for_camera(self, camera):
# Assert that we have correctly removed trailing spaces
assert(camera == camera.strip())
return self.image_typemap[camera]
def sed_matched_filters(self, bands):
from legacypipe.detection import sed_matched_filters
return sed_matched_filters(bands)
def index_of_band(self, b):
return self.allbands.index(b)
def read_intermediate_catalog(self, brick, **kwargs):
'''
Reads the intermediate tractor catalog for the given brickname.
*kwargs*: passed to self.find_file()
Returns (T, hdr, primhdr)
'''
fn = self.find_file('tractor-intermediate', brick=brick, **kwargs)
T = fits_table(fn)
hdr = T.get_header()
primhdr = fitsio.read_header(fn)
in_flux_prefix = ''
# Ensure flux arrays are 2d (N x 1)
keys = ['flux', 'flux_ivar', 'rchi2', 'fracflux', 'fracmasked',
'fracin', 'nobs', 'anymask', 'allmask', 'psfsize', 'depth',
'galdepth']
for k in keys:
incol = '%s%s' % (in_flux_prefix, k)
X = T.get(incol)
# Hmm, if we need to reshape one of these arrays, we will
# need to do all of them.
if len(X.shape) == 1:
X = X[:, np.newaxis]
T.set(incol, X)
return T, hdr, primhdr
def find_file(self, filetype, brick=None, brickpre=None, band='%(band)s',
camera=None, expnum=None, ccdname=None,
output=False, **kwargs):
'''
Returns the filename of a Legacy Survey file.
*filetype* : string, type of file to find, including:
"tractor" -- Tractor catalogs
"depth" -- PSF depth maps
"galdepth" -- Canonical galaxy depth maps
"nexp" -- number-of-exposure maps
*brick* : string, brick name such as "0001p000"
*output*: True if we are about to write this file; will use self.outdir as
the base directory rather than self.survey_dir.
Returns: path to the specified file (whether or not it exists).
'''
from glob import glob
if brick is None:
brick = '%(brick)s'
brickpre = '%(brick).3s'
else:
brickpre = brick[:3]
if output:
basedir = self.output_dir
else:
basedir = self.survey_dir
if brick is not None:
codir = os.path.join(basedir, 'coadd', brickpre, brick)
# Swap in files in the self.cache_dir, if they exist.
def swap(fn):
if output:
return fn
return self.check_cache(fn)
def swaplist(fns):
if output or self.cache_dir is None:
return fns
return [self.check_cache(fn) for fn in fns]
sname = self.file_prefix
if filetype == 'bricks':
return swap(os.path.join(basedir, 'survey-bricks.fits.gz'))
elif filetype == 'ccds':
return swaplist(
glob(os.path.join(basedir, 'survey-ccds*.fits.gz')))
elif filetype == 'ccd-kds':
return swaplist(
glob(os.path.join(basedir, 'survey-ccds*.kd.fits')))
elif filetype == 'tycho2':
dirnm = os.environ.get('TYCHO2_KD_DIR')
if dirnm is not None:
fn = os.path.join(dirnm, 'tycho2.kd.fits')
if os.path.exists(fn):
return fn
return swap(os.path.join(basedir, 'tycho2.kd.fits'))
elif filetype == 'large-galaxies':
fn = os.environ.get('LARGEGALAXIES_CAT')
if fn is not None:
if os.path.isfile(fn):
return fn
pat = os.path.join(basedir, 'LSLGA-*.kd.fits')
fns = glob(pat)
if len(fns) == 0:
return None
if len(fns) > 2:
print('More than one filename matched large-galaxy pattern', pat,
'; using the first one,', fns[0])
return fns[0]
elif filetype == 'annotated-ccds':
return swaplist(
glob(os.path.join(basedir, 'ccds-annotated-*.fits.gz')))
elif filetype == 'tractor':
return swap(os.path.join(basedir, 'tractor', brickpre,
'tractor-%s.fits' % brick))
elif filetype == 'tractor-intermediate':
return swap(os.path.join(basedir, 'tractor-i', brickpre,
'tractor-%s.fits' % brick))
elif filetype == 'galaxy-sims':
return swap(os.path.join(basedir, 'tractor', brickpre,
'galaxy-sims-%s.fits' % brick))
elif filetype in ['ccds-table', 'depth-table']:
ty = filetype.split('-')[0]
return swap(
os.path.join(codir, '%s-%s-%s.fits' % (sname, brick, ty)))
elif filetype in ['image-jpeg', 'model-jpeg', 'resid-jpeg',
'blobmodel-jpeg',
'imageblob-jpeg', 'simscoadd-jpeg','imagecoadd-jpeg',
'wise-jpeg', 'wisemodel-jpeg']:
ty = filetype.split('-')[0]
return swap(
os.path.join(codir, '%s-%s-%s.jpg' % (sname, brick, ty)))
elif filetype in ['outliers-pre', 'outliers-post',
'outliers-masked-pos', 'outliers-masked-neg']:
return swap(
os.path.join(basedir, 'metrics', brickpre,
'%s-%s.jpg' % (filetype, brick)))
elif filetype in ['invvar', 'chi2', 'image', 'model', 'blobmodel',
'depth', 'galdepth', 'nexp', 'psfsize',
'copsf']:
return swap(os.path.join(codir, '%s-%s-%s-%s.fits.fz' %
(sname, brick, filetype, band)))
elif filetype in ['blobmap']:
return swap(os.path.join(basedir, 'metrics', brickpre,
'blobs-%s.fits.gz' % (brick)))
elif filetype in ['maskbits']:
return swap(os.path.join(codir,
'%s-%s-%s.fits.fz' % (sname, brick, filetype)))
elif filetype in ['all-models']:
return swap(os.path.join(basedir, 'metrics', brickpre,
'all-models-%s.fits' % (brick)))
elif filetype == 'ref-sources':
return swap(os.path.join(basedir, 'metrics', brickpre,
'reference-%s.fits' % (brick)))
elif filetype == 'checksums':
return swap(os.path.join(basedir, 'tractor', brickpre,
'brick-%s.sha256sum' % brick))
elif filetype == 'outliers_mask':
return swap(os.path.join(basedir, 'metrics', brickpre,
'outlier-mask-%s.fits.fz' % (brick)))
print('Unknown filetype "%s"' % filetype)
assert(False)
def check_cache(self, fn):
if self.cache_dir is None:
return fn
cfn = fn.replace(self.survey_dir, self.cache_dir)
print('checking for cache fn', cfn)
if os.path.exists(cfn):
debug('Cached file hit:', fn, '->', cfn)
return cfn
debug('Cached file miss:', fn, '-/->', cfn)
return fn
def get_compression_string(self, filetype, shape=None, **kwargs):
pat = dict(# g: sigma ~ 0.002. qz -1e-3: 6 MB, -1e-4: 10 MB
image = '[compress R %(tilew)i,%(tileh)i; qz -1e-4]',
# g: qz -1e-3: 2 MB, -1e-4: 2.75 MB
model = '[compress R %(tilew)i,%(tileh)i; qz -1e-4]',
chi2 = '[compress R %(tilew)i,%(tileh)i; qz -0.1]',
# qz +8: 9 MB, qz +16: 10.5 MB
invvar = '[compress R %(tilew)i,%(tileh)i; qz 16]',
nexp = '[compress H %(tilew)i,%(tileh)i]',
maskbits = '[compress H %(tilew)i,%(tileh)i]',
depth = '[compress G %(tilew)i,%(tileh)i; qz 0]',
galdepth = '[compress G %(tilew)i,%(tileh)i; qz 0]',
psfsize = '[compress G %(tilew)i,%(tileh)i; qz 0]',
outliers_mask = '[compress G]',
).get(filetype)
#outliers_mask = '[compress H %i,%i]',
if pat is None:
return pat
# Tile compression size
tilew,tileh = 100,100
if shape is not None:
H,W = shape
# CFITSIO's fpack compression can't handle partial tile
# sizes < 4 pix. Select a tile size that works, or don't
# compress if we can't find one.
if W < 4 or H < 4:
return None
while tilew <= W:
remain = W % tilew
if remain == 0 or remain >= 4:
break
tilew += 1
while tileh <= H:
remain = H % tileh
if remain == 0 or remain >= 4:
break
tileh += 1
return pat % dict(tilew=tilew,tileh=tileh)
def write_output(self, filetype, hashsum=True, **kwargs):
'''
Returns a context manager for writing an output file.
Example use: ::
with survey.write_output('ccds', brick=brickname) as out:
ccds.writeto(out.fn, primheader=primhdr)
For FITS output, out.fits is a fitsio.FITS object. The file
contents will actually be written in memory, and then a
sha256sum computed before the file contents are written out to
the real disk file. The 'out.fn' member variable is NOT set.
::
with survey.write_output('ccds', brick=brickname) as out:
ccds.writeto(None, fits_object=out.fits, primheader=primhdr)
Does the following on entry:
- calls self.find_file() to determine which filename to write to
- ensures the output directory exists
- appends a ".tmp" to the filename
Does the following on exit:
- moves the ".tmp" to the final filename (to make it atomic)
- computes the sha256sum
'''
class OutputFileContext(object):
def __init__(self, fn, survey, hashsum=True, relative_fn=None,
compression=None):
'''
*compression*: a CFITSIO compression specification, eg:
"[compress R 100,100; qz -0.05]"
'''
self.real_fn = fn
self.relative_fn = relative_fn
self.survey = survey
self.is_fits = (fn.endswith('.fits') or
fn.endswith('.fits.gz') or
fn.endswith('.fits.fz'))
self.tmpfn = os.path.join(os.path.dirname(fn),
'tmp-'+os.path.basename(fn))
if self.is_fits:
self.fits = fitsio.FITS('mem://' + (compression or ''),
'rw')
else:
self.fn = self.tmpfn
self.hashsum = hashsum
def __enter__(self):
dirnm = os.path.dirname(self.tmpfn)
trymakedirs(dirnm)
return self
def __exit__(self, exc_type, exc_value, traceback):
# If an exception was thrown, bail out
if exc_type is not None:
return
if self.hashsum:
import hashlib
hashfunc = hashlib.sha256
sha = hashfunc()
if self.is_fits:
# Read back the data written into memory by the
# fitsio library
rawdata = self.fits.read_raw()
# close the fitsio file
self.fits.close()
# If gzip, we now have to actually do the
# compression to gzip format...
if self.tmpfn.endswith('.gz'):
from io import BytesIO
import gzip
#ulength = len(rawdata)
# We gzip to a memory file (BytesIO) so we can compute
# the hashcode before writing to disk for real
gzipped = BytesIO()
gzf = gzip.GzipFile(self.real_fn, 'wb', 9, gzipped)
gzf.write(rawdata)
gzf.close()
rawdata = gzipped.getvalue()
gzipped.close()
del gzipped
#clength = len(rawdata)
#print('Gzipped', ulength, 'to', clength)
if self.hashsum:
sha.update(rawdata)
f = open(self.tmpfn, 'wb')
f.write(rawdata)
f.close()
debug('Wrote', self.tmpfn)
del rawdata
else:
f = open(self.tmpfn, 'rb')
if self.hashsum:
sha.update(f.read())
f.close()
if self.hashsum:
hashcode = sha.hexdigest()
del sha
os.rename(self.tmpfn, self.real_fn)
debug('Renamed to', self.real_fn)
info('Wrote', self.real_fn)
if self.hashsum:
# List the relative filename (from output dir) in
# shasum file.
fn = self.relative_fn or self.real_fn
self.survey.add_hashcode(fn, hashcode)
# end of OutputFileContext class
# Get the output filename for this filetype
fn = self.find_file(filetype, output=True, **kwargs)
compress = self.get_compression_string(filetype, **kwargs)
# Find the relative path (relative to output_dir), which is the string
# we will put in the shasum file.
relfn = fn
if relfn.startswith(self.output_dir):
relfn = relfn[len(self.output_dir):]
if relfn.startswith('/'):
relfn = relfn[1:]
out = OutputFileContext(fn, self, hashsum=hashsum, relative_fn=relfn,
compression=compress)
return out
def add_hashcode(self, fn, hashcode):
'''
Callback to be called in the *write_output* routine.
'''
self.output_file_hashes[fn] = hashcode
def __getstate__(self):
'''
For pickling; we omit cached tables.
'''
d = self.__dict__.copy()
d['ccds'] = None
d['bricks'] = None
d['bricktree'] = None
d['ccd_kdtrees'] = None
return d
def drop_cache(self):
'''
Clears all cached data contained in this object. Useful for
pickling / multiprocessing.
'''
self.ccds = None
self.bricks = None
if self.bricktree is not None:
from astrometry.libkd.spherematch import tree_free
tree_free(self.bricktree)
self.bricktree = None
def get_calib_dir(self):
'''
Returns the directory containing calibration data.
'''
return os.path.join(self.survey_dir, 'calib')
def get_image_dir(self):
'''
Returns the directory containing image data.
'''
return os.path.join(self.survey_dir, 'images')
def get_survey_dir(self):
'''
Returns the base LEGACY_SURVEY_DIR directory.
'''
return self.survey_dir
def get_se_dir(self):
'''
Returns the directory containing SourceExtractor config files,
used during calibration.
'''
from pkg_resources import resource_filename
return resource_filename('legacypipe', 'config')
def get_bricks(self):
'''
Returns a table of bricks. The caller owns the table.
For read-only purposes, see *get_bricks_readonly()*, which
uses a cached version.
'''
return fits_table(self.find_file('bricks'))
def get_bricks_readonly(self):
'''
Returns a read-only (shared) copy of the table of bricks.
'''
if self.bricks is None:
self.bricks = self.get_bricks()
# Assert that bricks are the sizes we think they are.
# ... except for the two poles, which are half-sized
assert(np.all(np.abs((self.bricks.dec2 - self.bricks.dec1)[1:-1] -
self.bricksize) < 1e-3))
return self.bricks
def get_brick(self, brickid):
'''
Returns a brick (as one row in a table) by *brickid* (integer).
'''
B = self.get_bricks_readonly()
I, = np.nonzero(B.brickid == brickid)
if len(I) == 0:
return None
return B[I[0]]
def get_brick_by_name(self, brickname):
'''
Returns a brick (as one row in a table) by name (string).
'''
B = self.get_bricks_readonly()
I, = np.nonzero(np.array([n == brickname for n in B.brickname]))
if len(I) == 0:
return None
return B[I[0]]
def get_bricks_near(self, ra, dec, radius):
'''
Returns a set of bricks near the given RA,Dec and radius (all in degrees).
'''
bricks = self.get_bricks_readonly()
if self.cache_tree:
from astrometry.libkd.spherematch import tree_build_radec, tree_search_radec
# Use kdtree
if self.bricktree is None:
self.bricktree = tree_build_radec(bricks.ra, bricks.dec)
I = tree_search_radec(self.bricktree, ra, dec, radius)
else:
from astrometry.util.starutil_numpy import degrees_between
d = degrees_between(bricks.ra, bricks.dec, ra, dec)
I, = np.nonzero(d < radius)
if len(I) == 0:
return None
return bricks[I]
def bricks_touching_radec_box(self, bricks,
ralo, rahi, declo, dechi):
'''
Returns an index vector of the bricks that touch the given RA,Dec box.
'''
if bricks is None:
bricks = self.get_bricks_readonly()
if self.cache_tree and bricks == self.bricks:
from astrometry.libkd.spherematch import tree_build_radec, tree_search_radec
from astrometry.util.starutil_numpy import degrees_between
# Use kdtree
if self.bricktree is None:
self.bricktree = tree_build_radec(bricks.ra, bricks.dec)
# brick size
radius = np.sqrt(2.)/2. * self.bricksize
# + RA,Dec box size
radius = radius + degrees_between(ralo, declo, rahi, dechi) / 2.
dec = (dechi + declo) / 2.
c = (np.cos(np.deg2rad(rahi)) + np.cos(np.deg2rad(ralo))) / 2.
s = (np.sin(np.deg2rad(rahi)) + np.sin(np.deg2rad(ralo))) / 2.
ra = np.rad2deg(np.arctan2(s, c))
J = tree_search_radec(self.bricktree, ra, dec, radius)
I = J[np.nonzero((bricks.ra1[J] <= rahi ) * (bricks.ra2[J] >= ralo) *
(bricks.dec1[J] <= dechi) * (bricks.dec2[J] >= declo))[0]]
return I
if rahi < ralo:
# Wrap-around
debug('In Dec slice:', len(np.flatnonzero((bricks.dec1 <= dechi) *
(bricks.dec2 >= declo))))
debug('Above RAlo=', ralo, ':', len(np.flatnonzero(bricks.ra2 >= ralo)))
debug('Below RAhi=', rahi, ':', len(np.flatnonzero(bricks.ra1 <= rahi)))
debug('In RA slice:', len(np.nonzero(np.logical_or(bricks.ra2 >= ralo,
bricks.ra1 <= rahi))))
I, = np.nonzero(np.logical_or(bricks.ra2 >= ralo, bricks.ra1 <= rahi) *
(bricks.dec1 <= dechi) * (bricks.dec2 >= declo))
debug('In RA&Dec slice', len(I))
else:
I, = np.nonzero((bricks.ra1 <= rahi ) * (bricks.ra2 >= ralo) *
(bricks.dec1 <= dechi) * (bricks.dec2 >= declo))
return I
def get_ccds_readonly(self):
'''
Returns a shared copy of the table of CCDs.
'''
if self.ccds is None:
self.ccds = self.get_ccds()
return self.ccds
def filter_ccds_files(self, fns):
'''
When reading the list of CCDs, we find all files named
survey-ccds-\*.fits.gz, then filter that list using this function.
'''
return fns
def filter_ccd_kd_files(self, fns):
return fns
def get_ccds(self, **kwargs):
'''
Returns the table of CCDs.
'''
fns = self.find_file('ccd-kds')
fns = self.filter_ccd_kd_files(fns)
# If 'ccd-kds' files exist, read the CCDs tables from them!
# Otherwise, fall back to survey-ccds-*.fits.gz files.
if len(fns) == 0:
fns = self.find_file('ccds')
fns.sort()
fns = self.filter_ccds_files(fns)
if len(fns) == 0:
print('Failed to find any valid survey-ccds tables')
raise RuntimeError('No survey-ccds files')
TT = []
for fn in fns:
debug('Reading CCDs from', fn)
T = fits_table(fn, **kwargs)
debug('Got', len(T), 'CCDs')
TT.append(T)
if len(TT) > 1:
T = merge_tables(TT, columns='fillzero')
else:
T = TT[0]
debug('Total of', len(T), 'CCDs')
del TT
T = self.cleanup_ccds_table(T)
return T
def get_annotated_ccds(self):
'''
Returns the annotated table of CCDs.
'''
fns = self.find_file('annotated-ccds')
TT = []
for fn in fns:
debug('Reading annotated CCDs from', fn)
T = fits_table(fn)
debug('Got', len(T), 'CCDs')
TT.append(T)
T = merge_tables(TT, columns='fillzero')
debug('Total of', len(T), 'CCDs')
del TT
T = self.cleanup_ccds_table(T)
return T
def cleanup_ccds_table(self, ccds):
# Remove trailing spaces from 'ccdname' column
if 'ccdname' in ccds.columns():
# "N4 " -> "N4"
ccds.ccdname = np.array([s.strip() for s in ccds.ccdname])
# Remove trailing spaces from 'camera' column.
if 'camera' in ccds.columns():
ccds.camera = np.array([c.strip() for c in ccds.camera])
# And 'filter' column
if 'filter' in ccds.columns():
ccds.filter = np.array([f.strip() for f in ccds.filter])
return ccds
def ccds_touching_wcs(self, wcs, **kwargs):
'''
Returns a table of the CCDs touching the given *wcs* region.
'''
kdfns = self.get_ccd_kdtrees()
if len(kdfns):
from astrometry.libkd.spherematch import tree_search_radec
# MAGIC number: we'll search a 1-degree radius for CCDs
# roughly in range, then refine using the
# ccds_touching_wcs() function.
radius = 1.
ra,dec = wcs.radec_center()
TT = []
for fn,kd in kdfns:
I = tree_search_radec(kd, ra, dec, radius)
debug(len(I), 'CCDs within', radius, 'deg of RA,Dec',
'(%.3f, %.3f)' % (ra,dec))
if len(I) == 0:
continue
# Read only the CCD-table rows within range.
TT.append(fits_table(fn, rows=I))
if len(TT) == 0:
return None
ccds = merge_tables(TT, columns='fillzero')
ccds = self.cleanup_ccds_table(ccds)
else:
ccds = self.get_ccds_readonly()
I = ccds_touching_wcs(wcs, ccds, **kwargs)
if len(I) == 0:
return None
return ccds[I]
def get_ccd_kdtrees(self):
# check cache...
if self.ccd_kdtrees is not None:
return self.ccd_kdtrees
fns = self.find_file('ccd-kds')
fns = self.filter_ccd_kd_files(fns)
from astrometry.libkd.spherematch import tree_open
self.ccd_kdtrees = []
for fn in fns:
debug('Opening kd-tree', fn)
kd = tree_open(fn, 'ccds')
self.ccd_kdtrees.append((fn, kd))
return self.ccd_kdtrees
def get_image_object(self, t, **kwargs):
'''
Returns a DecamImage or similar object for one row of the CCDs table.
'''
# get Image subclass
imageType = self.image_class_for_camera(t.camera)
# call Image subclass constructor
return imageType(self, t, **kwargs)
def get_approx_wcs(self, ccd):
from astrometry.util.util import Tan
W,H = ccd.width,ccd.height
wcs = Tan(*[float(x) for x in
[ccd.crval1, ccd.crval2, ccd.crpix1, ccd.crpix2,
ccd.cd1_1, ccd.cd1_2, ccd.cd2_1, ccd.cd2_2, W, H]])
return wcs
def tims_touching_wcs(self, targetwcs, mp, bands=None,
**kwargs):
'''Creates tractor.Image objects for CCDs touching the given
*targetwcs* region.
mp: multiprocessing object
kwargs are passed to LegacySurveyImage.get_tractor_image() and
may include:
* gaussPsf
* pixPsf
'''
# Read images
C = self.ccds_touching_wcs(targetwcs)
# Sort by band
if bands is not None:
C.cut(np.array([b in bands for b in C.filter]))
ims = []
for t in C:
debug('Image file', t.image_filename, 'hdu', t.image_hdu)
im = self.get_image_object(t)
ims.append(im)
# Read images, clip to ROI
W,H = targetwcs.get_width(), targetwcs.get_height()
targetrd = np.array([targetwcs.pixelxy2radec(x,y) for x,y in
[(1,1),(W,1),(W,H),(1,H),(1,1)]])
args = [(im, targetrd, kwargs) for im in ims]
tims = mp.map(read_one_tim, args)
return tims
def find_ccds(self, expnum=None, ccdname=None, camera=None):
'''
Returns a table of CCDs matching the given *expnum* (exposure
number, integer), *ccdname* (string), and *camera* (string),
if given.
'''
if expnum is not None:
C = self.try_expnum_kdtree(expnum)
if C is not None:
if len(C) == 0:
return None
if ccdname is not None:
C = C[C.ccdname == ccdname]
if camera is not None:
C = C[C.camera == camera]
return C
if expnum is not None and ccdname is not None:
# use ccds_index
if self.ccds_index is None:
if self.ccds is not None:
C = self.ccds
else:
C = self.get_ccds(columns=['expnum','ccdname'])
self.ccds_index = dict([((e,n),i) for i,(e,n) in
enumerate(zip(C.expnum, C.ccdname))])
row = self.ccds_index[(expnum, ccdname)]
if self.ccds is not None:
return self.ccds[row]
#import numpy as np
#C = self.get_ccds(rows=np.array([row]))
#return C[0]
T = self.get_ccds_readonly()
if expnum is not None:
T = T[T.expnum == expnum]
if ccdname is not None:
T = T[T.ccdname == ccdname]
if camera is not None:
T = T[T.camera == camera]
return T
def try_expnum_kdtree(self, expnum):
'''
# By creating a kd-tree from the 'expnum' column, search for expnums
# can be sped up:
from astrometry.libkd.spherematch import *
from astrometry.util.fits import fits_table
T=fits_table('/global/cscratch1/sd/dstn/dr7-depthcut/survey-ccds-dr7.kd.fits',
columns=['expnum'])
ekd = tree_build(np.atleast_2d(T.expnum.copy()).T.astype(float),
nleaf=60, bbox=False, split=True)
ekd.set_name('expnum')
ekd.write('ekd.fits')
> fitsgetext -i $CSCRATCH/dr7-depthcut/survey-ccds-dr7.kd.fits -o dr7-%02i -a -M
> fitsgetext -i ekd.fits -o ekd-%02i -a -M
> cat dr7-0* ekd-0[123456] > $CSCRATCH/dr7-depthcut+/survey-ccds-dr7.kd.fits
'''
fns = self.find_file('ccd-kds')
fns = self.filter_ccd_kd_files(fns)
if len(fns) == 0:
return None
from astrometry.libkd.spherematch import tree_open
TT = []
for fn in fns:
debug('Searching', fn)
try:
kd = tree_open(fn, 'expnum')
except:
debug('Failed to open', fn, ':')
import traceback
traceback.print_exc()
continue
if kd is None:
return None
I = kd.search(np.array([expnum]), 0.5, 0, 0)
debug(len(I), 'CCDs with expnum', expnum, 'in', fn)
if len(I) == 0:
continue
# Read only the CCD-table rows within range.
TT.append(fits_table(fn, rows=I))
if len(TT) == 0:
##??
return fits_table()
ccds = merge_tables(TT, columns='fillzero')
ccds = self.cleanup_ccds_table(ccds)
return ccds
def run_calibs(X):
im = X[0]
kwargs = X[1]
noraise = kwargs.pop('noraise', False)
debug('run_calibs for image', im, ':', kwargs)
try:
return im.run_calibs(**kwargs)
except:
print('Exception in run_calibs:', im, kwargs)
import traceback
traceback.print_exc()
if not noraise:
raise
def read_one_tim(X):
(im, targetrd, kwargs) = X
#print('Reading', im)
tim = im.get_tractor_image(radecpoly=targetrd, **kwargs)
return tim
from tractor.psfex import PsfExModel
class SchlegelPsfModel(PsfExModel):
def __init__(self, fn=None, ext=1):
'''
`ext` is ignored.
'''
if fn is not None:
T = fits_table(fn)
T.about()
ims = fitsio.read(fn, ext=2)
print('Eigen-images', ims.shape)
nsch,h,w = ims.shape
hdr = fitsio.read_header(fn)
x0 = 0.
y0 = 0.
xscale = 1. / hdr['XSCALE']
yscale = 1. / hdr['YSCALE']
degree = (T.xexp + T.yexp).max()
self.sampling = 1.
# Reorder the 'ims' to match the way PsfEx sorts its polynomial terms
# number of terms in polynomial
ne = (degree + 1) * (degree + 2) // 2
print('Number of eigen-PSFs required for degree=', degree, 'is', ne)
self.psfbases = np.zeros((ne, h,w))
for d in range(degree + 1):
# x polynomial degree = j
# y polynomial degree = k
for j in range(d+1):
k = d - j
ii = j + (degree+1) * k - (k * (k-1))// 2
jj = np.flatnonzero((T.xexp == j) * (T.yexp == k))
if len(jj) == 0:
print('Schlegel image for power', j,k, 'not found')
continue
im = ims[jj,:,:]
print('Schlegel image for power', j,k, 'has range', im.min(), im.max(), 'sum', im.sum())
self.psfbases[ii,:,:] = ims[jj,:,:]
self.xscale, self.yscale = xscale, yscale
self.x0,self.y0 = x0,y0
self.degree = degree
print('SchlegelPsfEx degree:', self.degree)
bh,bw = self.psfbases[0].shape
self.radius = (bh+1)/2.
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.