repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
gustfrontar/LETKF_WRF
|
scale_breeding/python/plot_bv.py
|
1
|
3518
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 1 18:45:15 2016
@author:
"""
# LECTURA Y GRAFICADO RADAR (Formato binario GVAR-SMN)
import numpy as np
import matplotlib as plt
import datetime as dt
import binary_io as bio
import bred_vector_functions as bvf
import os
basedir='/home/jruiz/share/exp/'
expname = '/breeding_osaka_pawr_1km_bip5_global_1000m_UVT/'
plotbasedir=basedir + expname + '/plots/'
nbredvector=1 #Total number of bred vectors.
inibv=1 #Initial bred vector to plot.
endbv=1 #Final bred vector to plot.
nbipiter=1 #Total number of iterations if breeding in place is activated.
iniit=1 #Initial iter to plot.
endit=1 #End iter to plot.
undef_in=1.0e20
undef_out=np.nan
plotlevels=np.array([6,13,17]) #Which levels will be plotted.
plotvars=['U','V','W','T','QV','QHYD'] #Which variables will be plotted.
#Create the plotbasedir
if not os.path.exists(plotbasedir):
os.mkdir(plotbasedir)
#Defini initial and end times using datetime module.
itime = dt.datetime(2013,7,13,5,10,30) #Initial time.
etime = dt.datetime(2013,7,13,5,39,30) #End time.
#Define the delta.
delta=dt.timedelta(seconds=30)
nx=180
ny=180
nz=20
data_pp_o=dict()
data_pn_o=dict()
data_pp_r=dict()
data_pn_r=dict()
bv_o=dict()
bv_r=dict()
ctime=itime + delta
#Get lat lon.
lat=bio.read_data_direct(basedir + expname + '/latlon/lat_d01z001.grd',nx,ny,1,'>f4')[:,:,0]
lon=bio.read_data_direct(basedir + expname + '/latlon/lon_d01z001.grd',nx,ny,1,'>f4')[:,:,0]
for ibv in range (inibv , endbv + 1):
bvstr="%04d" % ibv
print( ' Plotting bred vector number ' + bvstr )
while ( ctime <= etime ):
for iter in range ( iniit , endit + 1 ):
iterstr="%04d" % iter
ptime=ctime - delta #Data correspinding to the previous step (to compute bv growth)
print ( 'The date is :', ctime )
print ( 'Reading the positive perturbation original')
mydir=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/' + bvstr + '/' + '/grads_pp_o' + iterstr + '/'
data_pp_o=bio.read_data_scale(mydir,expname,ctime,nx,ny,nz,undef_in=undef_in,undef_out=undef_out)
print ( 'Reading the negative perturbation original')
mydir=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/' + bvstr + '/' + '/grads_pn_o' + iterstr + '/'
data_pn_o=bio.read_data_scale(mydir,expname,ctime,nx,ny,nz,undef_in=undef_in,undef_out=undef_out)
print ( 'Reading the positive perturbation rescaled')
mydir=basedir + expname + ptime.strftime("%Y%m%d%H%M%S") + '/' + bvstr + '/' + '/grads_pp_r' + iterstr + '/'
data_pp_r=bio.read_data_scale(mydir,expname,ptime,nx,ny,nz,undef_in=undef_in,undef_out=undef_out)
print ( 'Reading the negative perturbation rescaled')
mydir=basedir + expname + ptime.strftime("%Y%m%d%H%M%S") + '/' + bvstr + '/' + '/grads_pn_r' + iterstr + '/'
data_pn_r=bio.read_data_scale(mydir,expname,ptime,nx,ny,nz,undef_in=undef_in,undef_out=undef_out)
#mydir=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/' + bvstr + '/'
#bv_pert_norm=bio.read_perturbation_norm_scale(mydir,expname,ctime,nx,ny,nz)
bv_o=bvf.data_diff( data_pp_o , data_pn_o )
bv_r=bvf.data_diff( data_pp_o , data_pn_o )
#Plot BV
mydir=plotbasedir + '/' + '/' + bvstr + '/' + iterstr + '/'
bvf.plot_bv(bv_o,data_pp_o,lon,lat,plotvars,plotlevels,mydir,date=ctime.strftime("%Y%m%d%H%M%S"))
ctime = ctime + delta
print ( "Finish time loop" )
|
gpl-3.0
|
awacha/credolib
|
credolib/qualitycontrol.py
|
1
|
10239
|
__all__ = ['assess_flux_stability', 'sum_measurement_times', 'assess_sample_stability',
'assess_instrumental_background', 'assess_transmission', 'assess_gc_fit', 'assess_fitting_results']
import ipy_table
import matplotlib.pyplot as plt
import numpy as np
from IPython.core.getipython import get_ipython
from IPython.display import display
from matplotlib.colors import LogNorm
from sastool.classes2 import Curve, Exposure
from sastool.misc.cormap import cormaptest
from sastool.misc.easylsq import nonlinear_leastsquares
from .io import load_exposure, load_mask
def assess_flux_stability(samplename='Glassy_Carbon'):
ip = get_ipython()
f = plt.figure()
ax1 = f.add_subplot(1, 1, 1)
plt.xlabel('Date of exposure')
plt.ylabel('Beam flux (photon/sec), continuous lines')
ax2 = plt.twinx()
plt.ylabel('Vacuum pressure (mbar), dotted lines')
plt.title('Beam flux stability')
samplenames = sorted([sn_ for sn_ in ip.user_ns['_headers_sample'] if samplename in sn_])
linestyles = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
lines = []
for sn, ls in zip(samplenames, linestyles):
print(sn)
heds = ip.user_ns['_headers_sample'][sn]
allheds = []
for k in heds.keys():
allheds.extend(heds[k])
allheds = sorted(allheds, key=lambda x: x.fsn)
flux = np.array([float(h.flux) for h in allheds])
dates = [h.date for h in allheds]
lines.extend(ax1.plot(dates, flux, ls + 'o', label='Flux (%s)' % sn))
vacuums = np.array([float(h.vacuum) for h in allheds])
lines.extend(ax2.plot(dates, vacuums, ls + 's', label='Vacuum (%s)' % sn, lw=2))
print(' Measurement duration: %.2f h' % ((dates[-1] - dates[0]).total_seconds() / 3600.))
print(' Mean flux: ', flux.mean(), '+/-', flux.std(), 'photons/sec')
print(' RMS variation of flux: ', flux.std() / flux.mean() * 100, '%')
print(' P-P variation of flux: ', flux.ptp() / flux.mean() * 100, '%')
ax1.legend(lines, [l.get_label() for l in lines], loc='best')
plt.show()
def sum_measurement_times():
ip = get_ipython()
headers = ip.user_ns['_headers_sample']
tab = [['Sample name', 'Distance (mm)', 'Measurement time (h)']]
for samplename in sorted(headers):
for dist in sorted(headers[samplename]):
tab.append([samplename, "%.2f" % dist,
'%.2f' % (sum([h.exposuretime for h in headers[samplename][dist]]) / 3600)])
tab = ipy_table.IpyTable(tab)
tab.apply_theme('basic')
display(tab)
def assess_sample_stability(end_cutoff=3):
ip = get_ipython()
rowavg = ip.user_ns['_rowavg']
tab = [['Sample name', 'Distance', 'Slope of autocorrelation function', 'Stability']]
plt.figure()
for sn in sorted(rowavg):
for dist in sorted(rowavg[sn]):
rowavg_rescaled = rowavg[sn][dist] / rowavg[sn][dist].mean()
rowavg_std = rowavg_rescaled.std() * np.ones_like(rowavg_rescaled)
try:
A, B, stat = nonlinear_leastsquares(np.arange(len(rowavg_rescaled)), rowavg_rescaled, rowavg_std,
lambda x, a, b: a * x + b, [0, 0])
problematic = (A.val > A.err * 3)
except TypeError:
A = 'N/A'
problematic = 2
tab.append([sn, '%.2f' % dist, A, ["\u2713", "\u2718\u2718\u2718\u2718\u2718", '\u274e'][problematic]])
plt.errorbar(np.arange(len(rowavg_rescaled)), rowavg_rescaled,
label=sn + ' %.2f mm' % dist) # ,diags_std[1:])
plt.xlabel('Separation in time (FSN units)')
plt.ylabel('Average discrepancy between curves')
plt.legend(loc='best')
tab = ipy_table.IpyTable(tab)
tab.apply_theme('basic')
display(tab)
plt.show()
def assess_instrumental_background(Wx=20, Wy=20, emptyname='Empty_Beam', maskname='mask.mat'):
ip = get_ipython()
for dist in ip.user_ns['_headers_sample'][emptyname]:
data = ip.user_ns['_data2d'][emptyname][dist]
assert isinstance(data, Exposure)
intensity = data.intensity / data.header.exposuretime
mask = load_mask(maskname).astype(np.bool)
m = np.zeros_like(intensity)
print(' Mean intensity per pixel:', intensity[mask].mean(), 'cps')
print(' STD intensity per pixel:', intensity[mask == 1].std(), 'cps')
print(' Total intensity:', intensity[mask == 1].sum(), 'cps')
for row in range(m.shape[0]):
for col in range(m.shape[1]):
m[row, col] = intensity[max(row - Wy, 0):min(row + Wy, m.shape[0] - 1),
max(col - Wx, 0):min(col + Wx, m.shape[1] - 1)][
mask[max(row - Wy, 0):min(row + Wy, m.shape[0] - 1),
max(col - Wx, 0):
min(col + Wx, m.shape[1] - 1)] == 1].mean()
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(intensity, norm=LogNorm())
plt.subplot(1, 2, 2)
plt.imshow(m)
plt.tight_layout()
plt.suptitle('Empty beam, {} mm'.format(dist))
def assess_transmission():
ip = get_ipython()
tab = [
['Sample name', 'Distance', 'Transmission', 'Linear absorption coefficient (1/cm)', 'Absorption length (cm)']]
for sample in sorted(ip.user_ns['_headers_sample']):
for dist in sorted(ip.user_ns['_headers_sample'][sample]):
transms_seen = []
for h in ip.user_ns['_headers_sample'][sample][dist]:
if float(h.transmission) not in transms_seen:
transms_seen.append(float(h.transmission))
transm = h.transmission
thickness = h.thickness
try:
mu = (-transm.log()) / thickness
except ZeroDivisionError:
mu = 'Infinite'
invmu = 0
else:
try:
invmu = (1 / mu).tostring(extra_digits=2)
except ZeroDivisionError:
invmu = 'Infinite'
mu = mu.tostring(extra_digits=2)
tab.append([sample, dist, transm.tostring(extra_digits=2), mu, invmu])
tab = ipy_table.IpyTable(tab)
tab.apply_theme('basic')
display(tab)
def assess_gc_fit(reffile=None, gcname='Glassy_Carbon'):
ip = get_ipython()
if reffile is None:
reffile = ip.user_ns['_loaders'][0].get_subpath('config/GC_data_nm.dat')
refcurve = Curve.new_from_file(reffile)
f = plt.figure()
f.add_subplot(1, 1, 1)
rads = {}
for fsn in sorted([h.fsn for h in ip.user_ns['_headers']['processed'] if h.title == gcname]):
try:
ex = load_exposure(fsn, raw=False, processed=True)
except:
continue
rads[ex.header.fsn] = ex.radial_average(refcurve.q)
del ex
qmin = max([r.sanitize().q.min() for r in rads.values()])
qmax = min([r.sanitize().q.max() for r in rads.values()])
refcurve.trim(qmin, qmax).loglog('o', mfc='none', ms=10)
for r in sorted(rads):
rads[r].loglog('.', label='#{:d}'.format(r))
plt.axis('tight')
plt.legend(loc='best', numpoints=1)
plt.xlabel('q (nm$^{-1}$)')
plt.ylabel('$d\Sigma/d\Omega$ (cm$^{-1}$ sr$^{-1}$)')
plt.grid(True, which='both')
plt.draw()
def calc_chi2(y, dy, fittedy):
return (((y - fittedy) / dy) ** 2).sum() / (len(y) - 1)
def calc_R2(y, fittedy):
SStot = ((y - np.mean(y)) ** 2).sum()
SSres = ((fittedy - y) ** 2).sum()
return 1 - SSres / SStot
def assess_fitting_results(basename, cormap_alpha=0.01):
"""Assess the results of a fit based on the .fit and .fir files created by
various programs from the ATSAS suite."""
plt.figure(figsize=(12, 4))
plt.subplot2grid((1, 4), (0, 0), colspan=2)
fir = np.loadtxt(basename + '.fir', skiprows=1) # q, Iexp, Errexp, Ifitted
# do a cormap test to compare the raw data and the model.
pvalf, Cf, cormapf = cormaptest(fir[:, 1], fir[:, 3])
cormapstatusf = ['Reject', 'Accept'][pvalf >= cormap_alpha]
plt.errorbar(fir[:, 0], fir[:, 1], fir[:, 2], None, 'bo-', label='Raw data')
plt.plot(fir[:, 0], fir[:, 3], 'r-', label='Fitted')
chi2 = calc_chi2(fir[:, 1], fir[:, 2], fir[:, 3])
R2 = calc_R2(fir[:, 1], fir[:, 3])
try:
skiprows = 0
while True:
try:
fit = np.loadtxt(basename + '.fit', skiprows=skiprows) # q, Ismoothed, Ifitted
break
except ValueError as ve:
if ve.args[0].startswith('could not convert string to float'):
skiprows += 1
continue
else:
raise
# do a cormap test to compare the raw data to the smoothed data
smoothed = fit[(fit[:, 0] >= fir[:, 0].min()) & (fit[:, 0] <= fir[:, 0].max()), 1]
pvals, Cs, cormaps = cormaptest(fir[:, 1], smoothed)
cormapstatuss = ['Reject', 'Accept'][pvals >= cormap_alpha]
plt.plot(fit[:, 0], fit[:, 1], 'g.-', label='Smoothed, extrapolated')
plt.plot(fit[:, 0], fit[:, 2], 'm-', label='Fitted to smoothed, extrapolated')
except ValueError as ve:
print('Error while loading file: {}.fit: {}'.format(basename, ve))
except FileNotFoundError:
fit = None
cormaps = cormapstatuss = pvals = Cs = None
plt.xscale('log')
plt.yscale('log')
plt.legend(loc='best')
plt.grid(which='both')
if fit is not None:
plt.subplot2grid((1, 4), (0, 2))
plt.imshow(cormaps, cmap='gray', interpolation='nearest')
plt.title('CorMap of the smoothing')
plt.subplot2grid((1, 4), (0, 3))
plt.imshow(cormapf, cmap='gray', interpolation='nearest')
plt.title('CorMap of the fitting')
print('R2: ', R2)
print('Chi2: ', chi2)
if fit is not None:
print('Cormap test of the smoothing: {} (p={}, C={}, N={})'.format(cormapstatuss, pvals, Cs, cormaps.shape[0]))
print('Cormap test of fit: {} (p={}, C={}, N={})'.format(cormapstatusf, pvalf, Cf, cormapf.shape[0]))
|
bsd-3-clause
|
HIPS/pgmult
|
experiments/ap_lds_indiv.py
|
1
|
19816
|
"""
Linear dynamical system model for the AP text dataset.
Each document is modeled as a draw from an LDS with
categorical observations.
"""
import os
import re
import gzip
import time
import pickle
import operator
import collections
import numpy as np
from scipy.misc import logsumexp
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
from hips.plotting.layout import create_axis_at_location, create_figure
import brewer2mpl
from pgmult.lds import MultinomialLDS
from pgmult.particle_lds import LogisticNormalMultinomialLDS, ParticleSBMultinomialLDS
from pgmult.hmm import MultinomialHMM
from pgmult.utils import pi_to_psi
from pylds.models import DefaultLDS, NonstationaryLDS
from pybasicbayes.distributions import GaussianFixed, Multinomial, Regression
from pybasicbayes.util.text import progprint_xrange
from autoregressive.distributions import AutoRegression
colors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
goodcolors = np.array([0,1,2,4,6,7,8])
colors = np.array(colors)[goodcolors]
np.seterr(invalid="warn")
np.random.seed(0)
np.seterr(invalid="warn")
np.random.seed(0)
# Model parameters
K = 200 # Number of words
# Data handling
def load(filename=None):
if filename is None:
bigstr = download_ap()
else:
with open(filename,'r') as infile:
bigstr = infile.read()
docs = re.findall(r'(?<=<TEXT> ).*?(?= </TEXT>)',bigstr.translate(None,'\n'))
vectorizer = CountVectorizer(stop_words='english',max_features=K).fit(docs)
docs = [make_onehot_seq(doc, vectorizer) for doc in docs]
# words = vectorizer.get_feature_names()
words = list(vectorizer.vocabulary_.keys())
# Sort by usage
# usage = np.array([doc.sum(0) for doc in docs]).sum(0)
# perm = np.argsort(usage)[::-1]
# docs = [doc[:,perm] for doc in docs]
# words = np.array(words)[perm]
words = np.array(words)
return docs, words
def download_ap():
from io import StringIO
from urllib.request import urlopen
import tarfile
print("Downloading AP data...")
response = urlopen('http://www.cs.princeton.edu/~blei/lda-c/ap.tgz')
tar = tarfile.open(fileobj=StringIO(response.read()))
return tar.extractfile('ap/ap.txt').read()
def filter_wordseq(doc, vectorizer):
return [w for w in doc if w in vectorizer.vocabulary_]
def make_onehot_seq(doc, vectorizer):
lst = filter_wordseq(vectorizer.build_analyzer()(doc), vectorizer)
indices = {word:idx for idx, word in enumerate(vectorizer.vocabulary_.keys())}
out = np.zeros((len(lst),len(indices)))
for wordidx, word in enumerate(lst):
out[wordidx, indices[word]] = 1
return out
# Inference stuff
# model, lls, test_lls, pred_lls, pis, psis, zs, timestamps
Results = collections.namedtuple("Results", ["lls", "test_lls", "pred_lls", "samples", "timestamps"])
def fit_lds_model(Xs, Xtest, D, N_samples=100):
Nx = len(Xs)
assert len(Xtest) == Nx
mus = [X.sum(0) + 0.1 for X in Xs]
mus = [mu/mu.sum() for mu in mus]
# mus = [np.ones(K)/float(K) for _ in Xs]
models = [MultinomialLDS(K, D,
init_dynamics_distn=GaussianFixed(mu=np.zeros(D), sigma=1*np.eye(D)),
dynamics_distn=AutoRegression(nu_0=D+1,S_0=1*np.eye(D),M_0=np.zeros((D,D)),K_0=1*np.eye(D)),
sigma_C=1., mu_pi=mus[i]) for i in range(Nx)]
for X, model in zip(Xs, models):
model.add_data(X)
[model.resample_parameters() for model in models]
def compute_pred_ll():
pred_ll = 0
for Xt, model in zip(Xtest, models):
pred_ll += model.predictive_log_likelihood(Xt, M=1)[0]
return pred_ll
init_results = (0, models, np.nan, np.nan, compute_pred_ll())
def resample():
tic = time.time()
[model.resample_model() for model in models]
toc = time.time() - tic
return toc, None, np.nan, np.nan, compute_pred_ll()
times, samples, lls, test_lls, pred_lls = \
list(map(np.array, list(zip(*([init_results] +
[resample() for _ in progprint_xrange(N_samples, perline=5)])))))
timestamps = np.cumsum(times)
return Results(lls, test_lls, pred_lls, samples, timestamps)
def fit_hmm(Xs, Xtest, D_hmm, N_samples=100):
Nx = len(Xs)
assert len(Xtest) == Nx
print("Fitting HMM with %d states" % D_hmm)
models = [MultinomialHMM(K, D_hmm, alpha_0=10.0) for _ in range(Nx)]
for X, model in zip(Xs, models):
model.add_data(X)
def compute_pred_ll():
pred_ll = 0
for Xtr, Xte, model in zip(Xs, Xtest, models):
pred_ll += model.log_likelihood(np.vstack((Xtr, Xte))) - model.log_likelihood(Xtr)
return pred_ll
init_results = (0, None, np.nan, np.nan, compute_pred_ll())
def resample():
tic = time.time()
[model.resample_model() for model in models]
toc = time.time() - tic
return toc, None, np.nan, np.nan, compute_pred_ll()
times, samples, lls, test_lls, pred_lls = \
list(map(np.array, list(zip(*([init_results] +
[resample() for _ in progprint_xrange(N_samples, perline=5)])))))
timestamps = np.cumsum(times)
return Results(lls, test_lls, pred_lls, samples, timestamps)
def fit_gaussian_lds_model(Xs, Xtest, D_gauss_lds, N_samples=100):
Nx = len(Xs)
assert len(Xtest) == Nx
print("Fitting Gaussian (Raw) LDS with %d states" % D_gauss_lds)
from pylds.models import NonstationaryLDS
models = [NonstationaryLDS(
init_dynamics_distn=GaussianFixed(mu=np.zeros(D), sigma=1*np.eye(D)),
dynamics_distn=AutoRegression(nu_0=D+1,S_0=1*np.eye(D),M_0=np.zeros((D,D)),K_0=1*np.eye(D)),
emission_distn=Regression(nu_0=K+1,S_0=K*np.eye(K),M_0=np.zeros((K,D)),K_0=K*np.eye(D)))
for _ in range(Nx)]
Xs_centered = [X - np.mean(X, axis=0)[None,:] + 1e-3*np.random.randn(*X.shape) for X in Xs]
for X, model in zip(Xs_centered, models):
model.add_data(X)
def compute_pred_ll():
pred_ll = 0
for Xtr, Xte, model in zip(Xs_centered, Xtest, models):
# Monte Carlo sample to get pi density implied by Gaussian LDS
Npred = 10
Tpred = Xte.shape[0]
preds = model.sample_predictions(Xtr, Tpred, Npred=Npred)
# Convert predictions to a distribution by finding the
# largest dimension for each predicted Gaussian.
# Preds is T x K x Npred, inds is TxNpred
inds = np.argmax(preds, axis=1)
pi = np.array([np.bincount(inds[t], minlength=K) for t in range(Tpred)]) / float(Npred)
assert np.allclose(pi.sum(axis=1), 1.0)
pi = np.clip(pi, 1e-8, 1.0)
pi /= pi.sum(axis=1)[:,None]
# Compute the log likelihood under pi
pred_ll += np.sum([Multinomial(weights=pi[t], K=K).log_likelihood(Xte[t][None,:])
for t in range(Tpred)])
return pred_ll
# TODO: Get initial pred ll
init_results = (0, None, np.nan, np.nan, compute_pred_ll())
def resample():
tic = time.time()
[model.resample_model() for model in models]
toc = time.time() - tic
return toc, None, np.nan, np.nan, compute_pred_ll()
times, samples, lls, test_lls, pred_lls = \
list(map(np.array, list(zip(*([init_results] +
[resample() for _ in progprint_xrange(N_samples, perline=5)])))))
timestamps = np.cumsum(times)
return Results(lls, test_lls, pred_lls, samples, timestamps)
def fit_ln_lds_model(Xs, Xtest, D, N_samples=100):
"""
Fit a logistic normal LDS model with pMCMC
"""
Nx = len(Xs)
assert len(Xtest) == Nx
print("Fitting Logistic Normal LDS with %d states" % D)
mus = [X.sum(0) + 0.1 for X in Xs]
mus = [np.log(mu/mu.sum()) for mu in mus]
models = [LogisticNormalMultinomialLDS(
init_dynamics_distn=GaussianFixed(mu=np.zeros(D), sigma=1*np.eye(D)),
dynamics_distn=AutoRegression(nu_0=D+1,S_0=D*np.eye(D),M_0=np.zeros((D,D)),K_0=D*np.eye(D)),
emission_distn=Regression(nu_0=K+1,S_0=K*np.eye(K),M_0=np.zeros((K,D)),K_0=K*np.eye(D)),
sigma_C=1.0, mu=mu) \
for mu in mus]
for model in models:
model.A = 0.5*np.eye(D)
model.sigma_states = np.eye(D)
model.C = 1.0*np.random.randn(K,D)
model.sigma_obs = 0.1*np.eye(K)
for X, model in zip(Xs, models):
model.add_data(X)
def compute_pred_ll():
pred_ll = 0
for Xte, model in zip(Xtest, models):
pred_ll += model.predictive_log_likelihood(Xte, Npred=1)[0]
return pred_ll
init_results = (0, None, np.nan, np.nan, compute_pred_ll())
def resample():
tic = time.time()
[model.resample_model() for model in models]
toc = time.time() - tic
return toc, None, np.nan, np.nan, compute_pred_ll()
times, samples, lls, test_lls, pred_lls = \
list(map(np.array, list(zip(*([init_results] +
[resample() for _ in progprint_xrange(N_samples, perline=5)])))))
timestamps = np.cumsum(times)
return Results(lls, test_lls, pred_lls, samples, timestamps)
def fit_lds_model_with_pmcmc(Xs, Xtest, D, N_samples=100):
"""
Fit a logistic normal LDS model with pMCMC
"""
Nx = len(Xs)
assert len(Xtest) == Nx
print("Fitting SBM-LDS with %d states using pMCMC" % D)
models = [ParticleSBMultinomialLDS(
init_dynamics_distn=GaussianFixed(mu=np.zeros(D), sigma=1*np.eye(D)),
dynamics_distn=AutoRegression(nu_0=D+1,S_0=D*np.eye(D),M_0=np.zeros((D,D)),K_0=D*np.eye(D)),
emission_distn=Regression(nu_0=K+1,S_0=K*np.eye(K),M_0=np.zeros((K,D)),K_0=K*np.eye(D)),
mu=pi_to_psi(np.ones(K)/K),
sigma_C=1.0)
for _ in range(Nx)]
for model in models:
model.A = 0.5*np.eye(D)
model.sigma_states = np.eye(D)
model.C = np.random.randn(K-1,D)
model.sigma_obs = 0.1*np.eye(K)
for X, model in zip(Xs, models):
model.add_data(X)
def compute_pred_ll():
pred_ll = 0
for Xte, model in zip(Xtest, models):
pred_ll += model.predictive_log_likelihood(Xte, Npred=100)[0]
return pred_ll
init_results = (0, None, np.nan, np.nan, compute_pred_ll())
def resample():
tic = time.time()
[model.resample_model() for model in models]
toc = time.time() - tic
return toc, None, np.nan, np.nan, compute_pred_ll()
times, samples, lls, test_lls, pred_lls = \
list(map(np.array, list(zip(*([init_results] +
[resample() for _ in progprint_xrange(N_samples, perline=5)])))))
timestamps = np.cumsum(times)
return Results(lls, test_lls, pred_lls, samples, timestamps)
def plot_log_likelihood(results, names, results_dir, outname="pred_ll_vs_time.pdf"):
# Plot the log likelihood
plt.figure(figsize=(3,3.2))
for i,(result, name) in enumerate(zip(results, names)):
plt.plot(result.timestamps, result.lls, lw=2, color=colors[i], label=name)
# plt.plot(gauss_lds_lls, lw=2, color=colors[2], label="Gaussian LDS")
plt.legend(loc="lower right")
plt.xlabel('Time (s)')
plt.ylabel("Log Likelihood")
# plt.title("Chr22 DNA Model")
plt.savefig(os.path.join(results_dir, outname))
plt.tight_layout()
def plot_pred_log_likelihood(results, names, results_dir, outname="pred_ll_vs_time.pdf", smooth=True):
# Plot the log likelihood
plt.figure(figsize=(3,3.2))
for i,(result, name) in enumerate(zip(results, names)):
if result.pred_lls.ndim == 2:
pred_ll = result.pred_lls[:,0]
else:
pred_ll = result.pred_lls
# Smooth the log likelihood
if smooth:
win = 10
pad_pred_ll = np.concatenate((pred_ll[0] * np.ones(win), pred_ll))
smooth_pred_ll = np.array([logsumexp(pad_pred_ll[j-win:j+1])-np.log(win)
for j in range(win, pad_pred_ll.size)])
plt.plot(np.clip(result.timestamps, 1e-3,np.inf), smooth_pred_ll,
lw=2, color=colors[i], label=name)
else:
plt.plot(np.clip(result.timestamps, 1e-3,np.inf), result.pred_lls,
lw=2, color=colors[i], label=name)
# if result.pred_lls.ndim == 2:
# plt.errorbar(np.clip(result.timestamps, 1e-3,np.inf),
# result.pred_lls[:,0],
# yerr=result.pred_lls[:,1],
# lw=2, color=colors[i], label=name)
# else:
# plt.plot(np.clip(result.timestamps, 1e-3,np.inf), result.pred_lls, lw=2, color=colors[i], label=name)
# plt.plot(gauss_lds_lls, lw=2, color=colors[2], label="Gaussian LDS")
# plt.legend(loc="lower right")
plt.xlabel('Time (s)')
plt.xscale("log")
plt.ylabel("Pred. Log Likelihood")
# plt.ylim(-700, -500)
# plt.title("Chr22 DNA Model")
plt.savefig(os.path.join(results_dir, outname))
plt.tight_layout()
def plot_pred_ll_vs_D(all_results, Ds, Xtrain, Xtest,
results_dir, models=None):
# Create a big matrix of shape (len(Ds) x 5 x T) for the pred lls
N = len(Ds) # Number of dimensions tests
M = len(all_results[0]) # Number of models tested
T = len(all_results[0][0].pred_lls) # Number of MCMC iters
pred_lls = np.zeros((N,M,T))
for n in range(N):
for m in range(M):
if all_results[n][m].pred_lls.ndim == 2:
pred_lls[n,m] = all_results[n][m].pred_lls[:,0]
else:
pred_lls[n,m] = all_results[n][m].pred_lls
# Compute the mean and standard deviation on burned in samples
burnin = T // 2
pred_ll_mean = logsumexp(pred_lls[:,:,burnin:], axis=-1) - np.log(T-burnin)
# Use bootstrap to compute error bars
pred_ll_std = np.zeros_like(pred_ll_mean)
for n in range(N):
for m in range(M):
samples = np.random.choice(pred_lls[n,m,burnin:], size=(100, (T-burnin)), replace=True)
pll_samples = logsumexp(samples, axis=1) - np.log(T-burnin)
pred_ll_std[n,m] = pll_samples.std()
# Get the baseline pred ll
baseline = 0
normalizer = 0
for Xtr, Xte in zip(Xtrain, Xtest):
pi_emp = Xtr.sum(0) / float(Xtr.sum())
pi_emp = np.clip(pi_emp, 1e-8, np.inf)
pi_emp /= pi_emp.sum()
baseline += Multinomial(weights=pi_emp, K=Xtr.shape[1]).log_likelihood(Xte).sum()
normalizer += Xte.sum()
# Make a bar chart with errorbars
from hips.plotting.layout import create_figure
fig = create_figure(figsize=(1.25,2.5), transparent=True)
fig.set_tight_layout(True)
ax = fig.add_subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
width = np.min(np.diff(Ds)) / (M+1.0) if len(Ds)>1 else 1.
for m in range(M):
ax.bar(Ds+m*width,
(pred_ll_mean[:,m] - baseline) / normalizer,
yerr=pred_ll_std[:,m] / normalizer,
width=0.9*width, color=colors[m], ecolor='k')
#
# ax.text(Ds+(m-1)*width, yloc, rankStr, horizontalalignment=align,
# verticalalignment='center', color=clr, weight='bold')
# Plot the zero line
ax.plot([Ds.min()-width, Ds.max()+(M+1)*width], np.zeros(2), '-k')
# Set the tick labels
ax.set_xlim(Ds.min()-width, Ds.max()+(M+1)*width)
# ax.set_xticks(Ds + (M*width)/2.)
# ax.set_xticklabels(Ds)
# ax.set_xticks(Ds + width * np.arange(M) + width/2. )
# ax.set_xticklabels(models, rotation=45)
ax.set_xticks([])
# ax.set_xlabel("D")
ax.set_ylabel("Pred. Log Lkhd. (nats/word)")
ax.set_title("AP News")
plt.savefig(os.path.join(results_dir, "pred_ll_vs_D.pdf"))
def compute_singular_vectors(model, words):
# Compute the left and right singular vectors of the model's
# dynamics matrix, A, then project these through C to get the
# corresponding vector psi, which can be transformed into a
# vector of word probabilities, pi, and sorted.
A, C, mu = model.A, model.C, model.emission_distn.mu
U,S,V = np.linalg.svd(A)
def top_k(k, pi):
# Get the top k words ranked by pi
perm = np.argsort(pi)[::-1]
return words[perm][:k]
for d in range(min(5, A.shape[0])):
ud = U[:,d]
vd = V[d,:]
psi_ud = C.dot(ud) + mu
psi_vd = C.dot(vd) + mu
from pgmult.internals.utils import psi_to_pi
baseline = psi_to_pi(mu)
pi_ud = psi_to_pi(psi_ud) - baseline
pi_vd = psi_to_pi(psi_vd) - baseline
# pi_ud = C.dot(ud)
# pi_vd = C.dot(vd)
print("")
print("Singular vector ", d, " Singular value, ", S[d])
print("Right: ")
print(top_k(5, pi_vd))
print("Left: ")
print(top_k(5, pi_ud))
if __name__ == "__main__":
run = 3
results_dir = os.path.join("results", "ap_indiv", "run%03d" % run)
# Make sure the results directory exists
from pgmult.internals.utils import mkdir
if not os.path.exists(results_dir):
print("Making results directory: ", results_dir)
mkdir(results_dir)
# Load the AP news documents
Xs, words = load()
# N_docs = 1
docs = slice(0,20)
T_split = 10
# Filter out documents shorter than 2 * T_split
Xfilt = [X for X in Xs if X.shape[0] > 5*T_split]
Xtrain = [X[:-T_split] for X in Xfilt[docs]]
Xtest = [X[-T_split:] for X in Xfilt[docs]]
# Perform inference for a range of latent state dimensions and models
N_samples = 200
all_results = []
Ds = np.array([10])
models = ["SBM-LDS", "HMM", "Raw LDS" , "LNM-LDS"]
methods = [fit_lds_model, fit_hmm, fit_gaussian_lds_model, fit_ln_lds_model]
# models = ["SBM-LDS", "HMM", "LNM-LDS"]
# methods = [fit_lds_model, fit_hmm, fit_ln_lds_model]
for D in Ds:
D_results = []
for model, method in zip(models, methods):
results_file = os.path.join(results_dir, "results_%s_D%d.pkl.gz" % (model, D))
if os.path.exists(results_file):
print("Loading from: ", results_file)
with gzip.open(results_file, "r") as f:
D_model_results = pickle.load(f)
else:
print("Fitting ", model, " for D=",D)
D_model_results = method(Xtrain, Xtest, D, N_samples)
with gzip.open(results_file, "w") as f:
print("Saving to: ", results_file)
pickle.dump(D_model_results, f, protocol=-1)
D_results.append(D_model_results)
all_results.append(D_results)
# Plot log likelihoods for the results using one D
res_index = 0
# plot_log_likelihood(all_results[res_index],
# models,
# results_dir,
# outname="train_ll_vs_time_D%d.pdf" % Ds[res_index])
#
plot_pred_log_likelihood(all_results[res_index],
models,
results_dir,
outname="pred_ll_vs_time_D%d.pdf" % Ds[res_index])
# Make a bar chart of all the results
plot_pred_ll_vs_D(all_results, Ds, Xtrain, Xtest, results_dir, models)
plt.show()
# Compute the singular vectors
print("Doc 0")
print(np.array(words)[np.where(Xfilt[docs][0])[1]])
compute_singular_vectors(all_results[0][0].samples[0][0], np.array(words))
|
mit
|
diegoaurino/numerical_python
|
calculating_pi_with_darts/calculating_pi_with_darts/calculating_pi_with_darts.py
|
1
|
2137
|
#!/usr/bin/env python
"""
Project: calculating_pi_with_darts
Description: A small script that estimates the area of a circle of radius r
inscribed in a square board with side 2r using n-darts randomly
thrown at this board (Monte Carlo methods).
The output gives the estimation of both the area of the circle and pi.
Author: Diego Aurino da Silva
Author URI: http://diegoaurino.info/
Default repository: https://github.com/diegoaurino/numerical_python
Version: NA
License: MIT
License URI: https://github.com/diegoaurino/numerical_python/blob/master/LICENSE
Copyright © 2017 Diego Aurino
"""
import sys, random, sympy, numpy
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def calculating_pi_with_darts(radius, number_of_darts):
axis_x = numpy.random.uniform(0, 2 * radius, number_of_darts)
axis_y = numpy.random.uniform(0, 2 * radius, number_of_darts)
# plotting
figure_plot = plt.figure()
axis_plot = figure_plot.add_subplot(111, aspect='equal')
axis_plot.scatter(axis_x, axis_y, color='b', marker='.', alpha=0.1)
axis_plot.add_patch(patches.Circle((radius, radius), radius, color='g', fill=False))
axis_plot.add_patch(patches.Rectangle((0, 0), 2 * radius, 2 * radius, color='r', fill=False))
plt.show()
# Extimation
fraction = len(
numpy.where(
(numpy.hypot(axis_x - radius, axis_y - radius)) < radius)[0]) / number_of_darts
print(
"\nInputted radius: {} mm".format(radius),
"\nNumber of darts: {}".format(number_of_darts),
"\nReal area: {} mm\u00B2".format(round(numpy.pi * (radius ** 2), 1)),
"\n", "--"*10,
"\nExt. area: {} mm\u00B2".format(round(4 * (radius ** 2 ) * fraction, 1)),
"\n"*2,
"\nReal \u03C0: {}".format(numpy.pi),
"\n", "--"*10,
"\nExt. \u03C0: {}".format(4 * fraction)
)
if __name__ == "__main__":
sys.exit(int(calculating_pi_with_darts(int(input('Please, insert the radius in mm: ')),
int(input('Please, insert the number of darts: '))) or 0))
|
mit
|
gfyoung/pandas
|
pandas/io/pytables.py
|
1
|
168169
|
"""
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
from __future__ import annotations
from contextlib import suppress
import copy
from datetime import date, tzinfo
import itertools
import os
import re
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Hashable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import warnings
import numpy as np
from pandas._config import config, get_option
from pandas._libs import lib, writers as libwriters
from pandas._libs.tslibs import timezones
from pandas._typing import ArrayLike, DtypeArg, FrameOrSeries, FrameOrSeriesUnion, Shape
from pandas.compat._optional import import_optional_dependency
from pandas.compat.pickle_compat import patch_pickle
from pandas.errors import PerformanceWarning
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_list_like,
is_string_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
PeriodIndex,
Series,
TimedeltaIndex,
concat,
isna,
)
from pandas.core.arrays import Categorical, DatetimeArray, PeriodArray
import pandas.core.common as com
from pandas.core.computation.pytables import PyTablesExpr, maybe_expression
from pandas.core.construction import extract_array
from pandas.core.indexes.api import ensure_index
from pandas.core.internals import BlockManager
from pandas.io.common import stringify_path
from pandas.io.formats.printing import adjoin, pprint_thing
if TYPE_CHECKING:
from tables import Col, File, Node
from pandas.core.internals import Block
# versioning attribute
_version = "0.15.2"
# encoding
_default_encoding = "UTF-8"
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, np.bytes_):
s = s.decode("UTF-8")
return s
def _ensure_encoding(encoding):
# set the encoding if we need
if encoding is None:
encoding = _default_encoding
return encoding
def _ensure_str(name):
"""
Ensure that an index / column name is a str (python 3); otherwise they
may be np.string dtype. Non-string dtypes are passed through unchanged.
https://github.com/pandas-dev/pandas/issues/13492
"""
if isinstance(name, str):
name = str(name)
return name
Term = PyTablesExpr
def _ensure_term(where, scope_level: int):
"""
Ensure that the where is a Term or a list of Term.
This makes sure that we are capturing the scope of variables that are
passed create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automatically a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
where = [
Term(term, scope_level=level + 1) if maybe_expression(term) else term
for term in where
if term is not None
]
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where if where is None or len(where) else None
class PossibleDataLossError(Exception):
pass
class ClosedFileError(Exception):
pass
class IncompatibilityWarning(Warning):
pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
class AttributeConflictWarning(Warning):
pass
attribute_conflict_doc = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
class DuplicateWarning(Warning):
pass
duplicate_doc = """
duplicate entries in table, taking most recently appended
"""
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {"f": "fixed", "fixed": "fixed", "t": "table", "table": "table"}
# axes map
_AXES_MAP = {DataFrame: [0]}
# register our configuration options
dropna_doc = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix("io.hdf"):
config.register_option("dropna_table", False, dropna_doc, validator=config.is_bool)
config.register_option(
"default_format",
None,
format_doc,
validator=config.is_one_of_factory(["fixed", "table", None]),
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
with suppress(AttributeError):
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == "strict"
)
return _table_mod
# interface to/from ###
def to_hdf(
path_or_buf,
key: str,
value: FrameOrSeries,
mode: str = "a",
complevel: Optional[int] = None,
complib: Optional[str] = None,
append: bool = False,
format: Optional[str] = None,
index: bool = True,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
dropna: Optional[bool] = None,
data_columns: Optional[Union[bool, List[str]]] = None,
errors: str = "strict",
encoding: str = "UTF-8",
):
""" store this object, close it if we opened it """
if append:
f = lambda store: store.append(
key,
value,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
else:
# NB: dropna is not passed to `put`
f = lambda store: store.put(
key,
value,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
errors=errors,
encoding=encoding,
dropna=dropna,
)
path_or_buf = stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
with HDFStore(
path_or_buf, mode=mode, complevel=complevel, complib=complib
) as store:
f(store)
else:
f(path_or_buf)
def read_hdf(
path_or_buf,
key=None,
mode: str = "r",
errors: str = "strict",
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
columns=None,
iterator=False,
chunksize: Optional[int] = None,
**kwargs,
):
"""
Read from the store, close it if we opened it.
Retrieve pandas object stored in file, optionally based on where
criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path_or_buf : str, path object, pandas.HDFStore or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.h5``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
Alternatively, pandas accepts an open :class:`pandas.HDFStore` object.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
key : object, optional
The group identifier in the store. Can be omitted if the HDF file
contains a single pandas object.
mode : {'r', 'r+', 'a'}, default 'r'
Mode to use when opening the file. Ignored if path_or_buf is a
:class:`pandas.HDFStore`. Default is 'r'.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
where : list, optional
A list of Term (or convertible) objects.
start : int, optional
Row number to start selection.
stop : int, optional
Row number to stop selection.
columns : list, optional
A list of columns names to return.
iterator : bool, optional
Return an iterator object.
chunksize : int, optional
Number of rows to include in an iteration when using an iterator.
**kwargs
Additional keyword arguments passed to HDFStore.
Returns
-------
item : object
The selected object. Return type depends on the object stored.
See Also
--------
DataFrame.to_hdf : Write a HDF file from a DataFrame.
HDFStore : Low-level access to HDF files.
Examples
--------
>>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z'])
>>> df.to_hdf('./store.h5', 'data')
>>> reread = pd.read_hdf('./store.h5')
"""
if mode not in ["r", "r+", "a"]:
raise ValueError(
f"mode {mode} is not allowed while performing a read. "
f"Allowed modes are r, r+ and a."
)
# grab the scope
if where is not None:
where = _ensure_term(where, scope_level=1)
if isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise OSError("The HDFStore must be open for reading.")
store = path_or_buf
auto_close = False
else:
path_or_buf = stringify_path(path_or_buf)
if not isinstance(path_or_buf, str):
raise NotImplementedError(
"Support for generic buffers has not been implemented."
)
try:
exists = os.path.exists(path_or_buf)
# if filepath is too long
except (TypeError, ValueError):
exists = False
if not exists:
raise FileNotFoundError(f"File {path_or_buf} does not exist")
store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
auto_close = True
try:
if key is None:
groups = store.groups()
if len(groups) == 0:
raise ValueError(
"Dataset(s) incompatible with Pandas data types, "
"not table, or no datasets found in HDF5 file."
)
candidate_only_group = groups[0]
# For the HDF file to have only one dataset, all other groups
# should then be metadata groups for that candidate group. (This
# assumes that the groups() method enumerates parent groups
# before their children.)
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError(
"key must be provided when HDF5 "
"file contains multiple datasets."
)
key = candidate_only_group._v_pathname
return store.select(
key,
where=where,
start=start,
stop=stop,
columns=columns,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
except (ValueError, TypeError, KeyError):
if not isinstance(path_or_buf, HDFStore):
# if there is an error, close the store if we opened it.
with suppress(AttributeError):
store.close()
raise
def _is_metadata_of(group: Node, parent_group: Node) -> bool:
"""Check if a given group is a metadata group for a given parent_group."""
if group._v_depth <= parent_group._v_depth:
return False
current = group
while current._v_depth > 1:
parent = current._v_parent
if parent == parent_group and current._v_name == "meta":
return True
current = current._v_parent
return False
class HDFStore:
"""
Dict-like IO interface for storing pandas objects in PyTables.
Either Fixed or Table format.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path : str
File path to HDF5 file.
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
**kwargs
These parameters will be passed to the PyTables open_file method.
Examples
--------
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
**Create or load HDF5 file in-memory**
When passing the `driver` option to the PyTables open_file method through
**kwargs, the HDF5 file is loaded or created in-memory and will only be
written when closed:
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5', driver='H5FD_CORE')
>>> store['foo'] = bar
>>> store.close() # only now, data is written to disk
"""
_handle: Optional[File]
_mode: str
_complevel: int
_fletcher32: bool
def __init__(
self,
path,
mode: str = "a",
complevel: Optional[int] = None,
complib=None,
fletcher32: bool = False,
**kwargs,
):
if "format" in kwargs:
raise ValueError("format is not a defined argument for HDFStore")
tables = import_optional_dependency("tables")
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
f"complib only supports {tables.filters.all_complibs} compression."
)
if complib is None and complevel is not None:
complib = tables.filters.default_complib
self._path = stringify_path(path)
if mode is None:
mode = "a"
self._mode = mode
self._handle = None
self._complevel = complevel if complevel else 0
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
def __fspath__(self):
return self._path
@property
def root(self):
""" return the root node """
self._check_if_open()
assert self._handle is not None # for mypy
return self._handle.root
@property
def filename(self):
return self._path
def __getitem__(self, key: str):
return self.get(key)
def __setitem__(self, key: str, value):
self.put(key, value)
def __delitem__(self, key: str):
return self.remove(key)
def __getattr__(self, name: str):
""" allow attribute access to get stores """
try:
return self.get(name)
except (KeyError, ClosedFileError):
pass
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
def __contains__(self, key: str) -> bool:
"""
check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if name == key or name[1:] == key:
return True
return False
def __len__(self) -> int:
return len(self.groups())
def __repr__(self) -> str:
pstr = pprint_thing(self._path)
return f"{type(self)}\nFile path: {pstr}\n"
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def keys(self, include: str = "pandas") -> List[str]:
"""
Return a list of keys corresponding to objects stored in HDFStore.
Parameters
----------
include : str, default 'pandas'
When kind equals 'pandas' return pandas objects.
When kind equals 'native' return native HDF5 Table objects.
.. versionadded:: 1.1.0
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
Raises
------
raises ValueError if kind has an illegal value
"""
if include == "pandas":
return [n._v_pathname for n in self.groups()]
elif include == "native":
assert self._handle is not None # mypy
return [
n._v_pathname for n in self._handle.walk_nodes("/", classname="Table")
]
raise ValueError(
f"`include` should be either 'pandas' or 'native' but is '{include}'"
)
def __iter__(self):
return iter(self.keys())
def items(self):
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
iteritems = items
def open(self, mode: str = "a", **kwargs):
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
**kwargs
These parameters will be passed to the PyTables open_file method.
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ["a", "w"] and mode in ["r", "r+"]:
pass
elif mode in ["w"]:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
f"Re-opening the file [{self._path}] with mode [{self._mode}] "
"will delete the current file!"
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complevel and self._complevel > 0:
self._filters = _tables().Filters(
self._complevel, self._complib, fletcher32=self._fletcher32
)
if _table_file_open_policy_is_strict and self.is_open:
msg = (
"Cannot open HDF5 file, which is already opened, "
"even in read-only mode."
)
raise ValueError(msg)
self._handle = tables.open_file(self._path, self._mode, **kwargs)
def close(self):
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self) -> bool:
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync: bool = False):
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
with suppress(OSError):
os.fsync(self._handle.fileno())
def get(self, key: str):
"""
Retrieve pandas object stored in file.
Parameters
----------
key : str
Returns
-------
object
Same type as object stored in file.
"""
with patch_pickle():
# GH#31167 Without this patch, pickle doesn't know how to unpickle
# old DateOffset objects now that they are cdef classes.
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
return self._read_group(group)
def select(
self,
key: str,
where=None,
start=None,
stop=None,
columns=None,
iterator=False,
chunksize=None,
auto_close: bool = False,
):
"""
Retrieve pandas object stored in file, optionally based on where criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
Object being retrieved from file.
where : list or None
List of Term (or convertible) objects, optional.
start : int or None
Row number to start selection.
stop : int, default None
Row number to stop selection.
columns : list or None
A list of columns that if not None, will limit the return columns.
iterator : bool or False
Returns an iterator.
chunksize : int or None
Number or rows to include in iteration, return an iterator.
auto_close : bool or False
Should automatically close the store when finished.
Returns
-------
object
Retrieved object from file.
"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop, where=_where, columns=columns)
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=s.nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result()
def select_as_coordinates(
self,
key: str,
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
return the selection as an Index
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_coordinates with a table")
return tbl.read_coordinates(where=where, start=start, stop=stop)
def select_column(
self,
key: str,
column: str,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
return a single column from the table. This is generally only useful to
select an indexable
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
column : str
The column of interest.
start : int or None, default None
stop : int or None, default None
Raises
------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_column with a table")
return tbl.read_column(column=column, start=start, stop=stop)
def select_as_multiple(
self,
keys,
where=None,
selector=None,
columns=None,
start=None,
stop=None,
iterator=False,
chunksize=None,
auto_close: bool = False,
):
"""
Retrieve pandas objects from multiple tables.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : bool, default False
Should automatically close the store when finished.
Raises
------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, str):
return self.select(
key=keys,
where=where,
columns=columns,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError(f"Invalid table [{k}]")
if not t.is_table:
raise TypeError(
f"object [{t.pathname}] is not a table, and cannot be used in all "
"select as multiple"
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError("all tables must have exactly the same nrows!")
# The isinstance checks here are redundant with the check above,
# but necessary for mypy; see GH#29757
_tbls = [x for x in tbls if isinstance(x, Table)]
# axis is the concentration axes
axis = list({t.non_index_axes[0][0] for t in _tbls})[0]
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [
t.read(where=_where, columns=columns, start=_start, stop=_stop)
for t in tbls
]
# concat and return
return concat(objs, axis=axis, verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result(coordinates=True)
def put(
self,
key: str,
value: FrameOrSeries,
format=None,
index=True,
append=False,
complib=None,
complevel: Optional[int] = None,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
data_columns: Optional[List[str]] = None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
dropna: bool = False,
):
"""
Store object in HDFStore.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'fixed(f)|table(t)', default is 'fixed'
Format to use when storing object in HDFStore. Value can be one of:
``'fixed'``
Fixed format. Fast writing/reading. Not-appendable, nor searchable.
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
append : bool, default False
This will force Table format, append the input data to the existing.
data_columns : list, default None
List of columns to create as data columns, or True to use all columns.
See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
track_times : bool, default True
Parameter is propagated to 'create_table' method of 'PyTables'.
If set to False it enables to have the same h5 files (same hashes)
independent on creation time.
.. versionadded:: 1.1.0
"""
if format is None:
format = get_option("io.hdf.default_format") or "fixed"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
encoding=encoding,
errors=errors,
track_times=track_times,
dropna=dropna,
)
def remove(self, key: str, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Raises
------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as err:
# In tests we get here with ClosedFileError, TypeError, and
# _table_mod.NoSuchNodeError. TODO: Catch only these?
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!"
) from err
# we are actually trying to remove a node (with children)
node = self.get_node(key)
if node is not None:
node._f_remove(recursive=True)
return None
# remove the node
if com.all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
"can only remove with where on objects written as tables"
)
return s.delete(where=where, start=start, stop=stop)
def append(
self,
key: str,
value: FrameOrSeries,
format=None,
axes=None,
index=True,
append=True,
complib=None,
complevel: Optional[int] = None,
columns=None,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
nan_rep=None,
chunksize=None,
expectedrows=None,
dropna: Optional[bool] = None,
data_columns: Optional[List[str]] = None,
encoding=None,
errors: str = "strict",
):
"""
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'table' is the default
Format to use when storing object in HDFStore. Value can be one of:
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
append : bool, default True
Append the input data to the existing.
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum str sizes
nan_rep : str to use as str nan representation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for str
dropna : bool, default False
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
if columns is not None:
raise TypeError(
"columns is not a supported keyword in append, try data_columns"
)
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or "table"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
axes=axes,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
data_columns=data_columns,
encoding=encoding,
errors=errors,
)
def append_to_multiple(
self,
d: Dict,
value,
selector,
data_columns=None,
axes=None,
dropna=False,
**kwargs,
):
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError(
"axes is currently not accepted as a parameter to append_to_multiple; "
"you can create the tables independently instead"
)
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values: List = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how="all").index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
min_itemsize = kwargs.pop("min_itemsize", None)
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex(v, axis=axis)
filtered = (
{key: value for (key, value) in min_itemsize.items() if key in v}
if min_itemsize is not None
else None
)
self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs)
def create_table_index(
self,
key: str,
columns=None,
optlevel: Optional[int] = None,
kind: Optional[str] = None,
):
"""
Create a pytables index on the table.
Parameters
----------
key : str
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError: raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not isinstance(s, Table):
raise TypeError("cannot create table index on a Fixed format store")
s.create_index(columns=columns, optlevel=optlevel, kind=kind)
def groups(self):
"""
Return a list of all the top-level nodes.
Each node returned is not a pandas storage object.
Returns
-------
list
List of objects.
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
return [
g
for g in self._handle.walk_groups()
if (
not isinstance(g, _table_mod.link.Link)
and (
getattr(g._v_attrs, "pandas_type", None)
or getattr(g, "table", None)
or (isinstance(g, _table_mod.table.Table) and g._v_name != "table")
)
)
]
def walk(self, where="/"):
"""
Walk the pytables group hierarchy for pandas objects.
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
.. versionadded:: 0.24.0
Parameters
----------
where : str, default "/"
Group where to start walking.
Yields
------
path : str
Full path to a group (without trailing '/').
groups : list
Names (strings) of the groups contained in `path`.
leaves : list
Names (strings) of the pandas objects contained in `path`.
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
for g in self._handle.walk_groups(where):
if getattr(g._v_attrs, "pandas_type", None) is not None:
continue
groups = []
leaves = []
for child in g._v_children.values():
pandas_type = getattr(child._v_attrs, "pandas_type", None)
if pandas_type is None:
if isinstance(child, _table_mod.group.Group):
groups.append(child._v_name)
else:
leaves.append(child._v_name)
yield (g._v_pathname.rstrip("/"), groups, leaves)
def get_node(self, key: str) -> Optional[Node]:
""" return the node with the key or None if it does not exist """
self._check_if_open()
if not key.startswith("/"):
key = "/" + key
assert self._handle is not None
assert _table_mod is not None # for mypy
try:
node = self._handle.get_node(self.root, key)
except _table_mod.exceptions.NoSuchNodeError:
return None
assert isinstance(node, _table_mod.Node), type(node)
return node
def get_storer(self, key: str) -> Union[GenericFixed, Table]:
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
s = self._create_storer(group)
s.infer_axes()
return s
def copy(
self,
file,
mode="w",
propindexes: bool = True,
keys=None,
complib=None,
complevel: Optional[int] = None,
fletcher32: bool = False,
overwrite=True,
):
"""
Copy the existing store to a new file, updating in place.
Parameters
----------
propindexes : bool, default True
Restore indexes in copied file.
keys : list, optional
List of keys to include in the copy (defaults to all).
overwrite : bool, default True
Whether to overwrite (remove and replace) existing nodes in the new store.
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32
)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if isinstance(s, Table):
index: Union[bool, List[str]] = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k,
data,
index=index,
data_columns=getattr(s, "data_columns", None),
encoding=s.encoding,
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
def info(self) -> str:
"""
Print detailed information on the store.
Returns
-------
str
"""
path = pprint_thing(self._path)
output = f"{type(self)}\nFile path: {path}\n"
if self.is_open:
lkeys = sorted(self.keys())
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(pprint_thing(s or "invalid_HDFStore node"))
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as detail:
keys.append(k)
dstr = pprint_thing(detail)
values.append(f"[invalid_HDFStore node: {dstr}]")
output += adjoin(12, keys, values)
else:
output += "Empty"
else:
output += "File is CLOSED"
return output
# ------------------------------------------------------------------------
# private methods
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError(f"{self._path} file is not open!")
def _validate_format(self, format: str) -> str:
""" validate / deprecate formats """
# validate
try:
format = _FORMAT_MAP[format.lower()]
except KeyError as err:
raise TypeError(f"invalid HDFStore format specified [{format}]") from err
return format
def _create_storer(
self,
group,
format=None,
value: Optional[FrameOrSeries] = None,
encoding: str = "UTF-8",
errors: str = "strict",
) -> Union[GenericFixed, Table]:
""" return a suitable class to operate """
cls: Union[Type[GenericFixed], Type[Table]]
if value is not None and not isinstance(value, (Series, DataFrame)):
raise TypeError("value must be None, Series, or DataFrame")
def error(t):
# return instead of raising so mypy can tell where we are raising
return TypeError(
f"cannot properly create the storer for: [{t}] [group->"
f"{group},value->{type(value)},format->{format}"
)
pt = _ensure_decoded(getattr(group._v_attrs, "pandas_type", None))
tt = _ensure_decoded(getattr(group._v_attrs, "table_type", None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
assert _table_mod is not None # for mypy
if getattr(group, "table", None) or isinstance(
group, _table_mod.table.Table
):
pt = "frame_table"
tt = "generic_table"
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed"
)
else:
if isinstance(value, Series):
pt = "series"
else:
pt = "frame"
# we are actually a table
if format == "table":
pt += "_table"
# a storer node
if "table" not in pt:
_STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed}
try:
cls = _STORER_MAP[pt]
except KeyError as err:
raise error("_STORER_MAP") from err
return cls(self, group, encoding=encoding, errors=errors)
# existing node (and must be a table)
if tt is None:
# if we are a writer, determine the tt
if value is not None:
if pt == "series_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_series"
elif index.nlevels > 1:
tt = "appendable_multiseries"
elif pt == "frame_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_frame"
elif index.nlevels > 1:
tt = "appendable_multiframe"
_TABLE_MAP = {
"generic_table": GenericTable,
"appendable_series": AppendableSeriesTable,
"appendable_multiseries": AppendableMultiSeriesTable,
"appendable_frame": AppendableFrameTable,
"appendable_multiframe": AppendableMultiFrameTable,
"worm": WORMTable,
}
try:
cls = _TABLE_MAP[tt]
except KeyError as err:
raise error("_TABLE_MAP") from err
return cls(self, group, encoding=encoding, errors=errors)
def _write_to_group(
self,
key: str,
value: FrameOrSeries,
format,
axes=None,
index=True,
append=False,
complib=None,
complevel: Optional[int] = None,
fletcher32=None,
min_itemsize: Optional[Union[int, Dict[str, int]]] = None,
chunksize=None,
expectedrows=None,
dropna=False,
nan_rep=None,
data_columns=None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
):
# we don't want to store a table node at all if our object is 0-len
# as there are not dtypes
if getattr(value, "empty", None) and (format == "table" or append):
return
group = self._identify_group(key, append)
s = self._create_storer(group, format, value, encoding=encoding, errors=errors)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if not s.is_table or (s.is_table and format == "fixed" and s.is_exists):
raise ValueError("Can only append to Tables")
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError("Compression not supported on Fixed format stores")
# write the object
s.write(
obj=value,
axes=axes,
append=append,
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
min_itemsize=min_itemsize,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
nan_rep=nan_rep,
data_columns=data_columns,
track_times=track_times,
)
if isinstance(s, Table) and index:
s.create_index(columns=index)
def _read_group(self, group: Node):
s = self._create_storer(group)
s.infer_axes()
return s.read()
def _identify_group(self, key: str, append: bool) -> Node:
"""Identify HDF5 group based on key, delete/create group if needed."""
group = self.get_node(key)
# we make this assertion for mypy; the get_node call will already
# have raised if this is incorrect
assert self._handle is not None
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
if group is None:
group = self._create_nodes_and_group(key)
return group
def _create_nodes_and_group(self, key: str) -> Node:
"""Create nodes from key and return group name."""
# assertion for mypy
assert self._handle is not None
paths = key.split("/")
# recursively create the groups
path = "/"
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith("/"):
new_path += "/"
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
return group
class TableIterator:
"""
Define the iteration interface on a table
Parameters
----------
store : HDFStore
s : the referred storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : bool, default False
Whether to use the default iterator.
chunksize : the passed chunking value (default is 100000)
auto_close : bool, default False
Whether to automatically close the store at the end of iteration.
"""
chunksize: Optional[int]
store: HDFStore
s: Union[GenericFixed, Table]
def __init__(
self,
store: HDFStore,
s: Union[GenericFixed, Table],
func,
where,
nrows,
start=None,
stop=None,
iterator: bool = False,
chunksize: Optional[int] = None,
auto_close: bool = False,
):
self.store = store
self.s = s
self.func = func
self.where = where
# set start/stop if they are not set if we are a table
if self.s.is_table:
if nrows is None:
nrows = 0
if start is None:
start = 0
if stop is None:
stop = nrows
stop = min(nrows, stop)
self.nrows = nrows
self.start = start
self.stop = stop
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self):
# iterate
current = self.start
if self.coordinates is None:
raise ValueError("Cannot iterate until get_result is called.")
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self):
if self.auto_close:
self.store.close()
def get_result(self, coordinates: bool = False):
# return the actual iterator
if self.chunksize is not None:
if not isinstance(self.s, Table):
raise TypeError("can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
if not isinstance(self.s, Table):
raise TypeError("can only read_coordinates on a table")
where = self.s.read_coordinates(
where=self.where, start=self.start, stop=self.stop
)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol:
"""
an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable = True
is_data_indexable = True
_info_fields = ["freq", "tz", "index_name"]
name: str
cname: str
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname: Optional[str] = None,
axis=None,
pos=None,
freq=None,
tz=None,
index_name=None,
ordered=None,
table=None,
meta=None,
metadata=None,
):
if not isinstance(name, str):
raise ValueError("`name` must be a str.")
self.values = values
self.kind = kind
self.typ = typ
self.name = name
self.cname = cname or name
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.ordered = ordered
self.table = table
self.meta = meta
self.metadata = metadata
if pos is not None:
self.set_pos(pos)
# These are ensured as long as the passed arguments match the
# constructor annotations.
assert isinstance(self.name, str)
assert isinstance(self.cname, str)
@property
def itemsize(self) -> int:
# Assumes self.typ has already been initialized
return self.typ.itemsize
@property
def kind_attr(self) -> str:
return f"{self.name}_kind"
def set_pos(self, pos: int):
""" set the position of this column in the Table """
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
def __repr__(self) -> str:
temp = tuple(
map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind))
)
return ",".join(
(
f"{key}->{value}"
for key, value in zip(["name", "cname", "axis", "pos", "kind"], temp)
)
)
def __eq__(self, other: Any) -> bool:
""" compare 2 col items """
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "axis", "pos"]
)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
@property
def is_indexed(self) -> bool:
""" return whether I am an indexed column """
if not hasattr(self.table, "cols"):
# e.g. if infer hasn't been called yet, self.table will be None.
return False
return getattr(self.table.cols, self.cname).is_indexed
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Convert the data from this selection to the appropriate pandas type.
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
val_kind = _ensure_decoded(self.kind)
values = _maybe_convert(values, val_kind, encoding, errors)
kwargs = {}
kwargs["name"] = _ensure_decoded(self.index_name)
if self.freq is not None:
kwargs["freq"] = _ensure_decoded(self.freq)
factory: Union[Type[Index], Type[DatetimeIndex]] = Index
if is_datetime64_dtype(values.dtype) or is_datetime64tz_dtype(values.dtype):
factory = DatetimeIndex
# making an Index instance could throw a number of different errors
try:
new_pd_index = factory(values, **kwargs)
except ValueError:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if "freq" in kwargs:
kwargs["freq"] = None
new_pd_index = factory(values, **kwargs)
new_pd_index = _set_tz(new_pd_index, self.tz)
return new_pd_index, new_pd_index
def take_data(self):
""" return the values"""
return self.values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
""" return my current col description """
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
""" return my cython values """
return self.values
def __iter__(self):
return iter(self.values)
def maybe_set_size(self, min_itemsize=None):
"""
maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size
"""
if _ensure_decoded(self.kind) == "string":
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos)
def validate_names(self):
pass
def validate_and_set(self, handler: AppendableTable, append: bool):
self.table = handler.table
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == "string":
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
f"Trying to store a string with len [{itemsize}] in "
f"[{self.cname}] column but\nthis column has a limit of "
f"[{c.itemsize}]!\nConsider using min_itemsize to "
"preset the sizes on these columns"
)
return c.itemsize
return None
def validate_attr(self, append: bool):
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError(
f"incompatible kind in col [{existing_kind} - {self.kind}]"
)
def update_info(self, info):
"""
set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed
"""
for key in self._info_fields:
value = getattr(self, key, None)
idx = info.setdefault(self.name, {})
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ["freq", "index_name"]:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
f"invalid info for [{self.name}] for [{key}], "
f"existing_value [{existing_value}] conflicts with "
f"new value [{value}]"
)
else:
if value is not None or existing_value is not None:
idx[key] = value
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def set_attr(self):
""" set the kind for this column """
setattr(self.attrs, self.kind_attr, self.kind)
def validate_metadata(self, handler: AppendableTable):
""" validate that kind=category does not change the categories """
if self.meta == "category":
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if (
new_metadata is not None
and cur_metadata is not None
and not array_equivalent(new_metadata, cur_metadata)
):
raise ValueError(
"cannot append a categorical with "
"different categories to the existing"
)
def write_metadata(self, handler: AppendableTable):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
class GenericIndexCol(IndexCol):
""" an index which is not represented in the data of the table """
@property
def is_indexed(self) -> bool:
return False
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Convert the data from this selection to the appropriate pandas type.
Parameters
----------
values : np.ndarray
nan_rep : str
encoding : str
errors : str
"""
assert isinstance(values, np.ndarray), type(values)
values = Int64Index(np.arange(len(values)))
return values, values
def set_attr(self):
pass
class DataCol(IndexCol):
"""
a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ["tz", "ordered"]
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname=None,
pos=None,
tz=None,
ordered=None,
table=None,
meta=None,
metadata=None,
dtype: Optional[DtypeArg] = None,
data=None,
):
super().__init__(
name=name,
values=values,
kind=kind,
typ=typ,
pos=pos,
cname=cname,
tz=tz,
ordered=ordered,
table=table,
meta=meta,
metadata=metadata,
)
self.dtype = dtype
self.data = data
@property
def dtype_attr(self) -> str:
return f"{self.name}_dtype"
@property
def meta_attr(self) -> str:
return f"{self.name}_meta"
def __repr__(self) -> str:
temp = tuple(
map(
pprint_thing, (self.name, self.cname, self.dtype, self.kind, self.shape)
)
)
return ",".join(
(
f"{key}->{value}"
for key, value in zip(["name", "cname", "dtype", "kind", "shape"], temp)
)
)
def __eq__(self, other: Any) -> bool:
""" compare 2 col items """
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "dtype", "pos"]
)
def set_data(self, data: ArrayLike):
assert data is not None
assert self.dtype is None
data, dtype_name = _get_data_and_dtype_name(data)
self.data = data
self.dtype = dtype_name
self.kind = _dtype_to_kind(dtype_name)
def take_data(self):
""" return the data """
return self.data
@classmethod
def _get_atom(cls, values: ArrayLike) -> Col:
"""
Get an appropriately typed and shaped pytables.Col object for values.
"""
dtype = values.dtype
# error: "ExtensionDtype" has no attribute "itemsize"
itemsize = dtype.itemsize # type: ignore[attr-defined]
shape = values.shape
if values.ndim == 1:
# EA, use block shape pretending it is 2D
# TODO(EA2D): not necessary with 2D EAs
shape = (1, values.size)
if isinstance(values, Categorical):
codes = values.codes
atom = cls.get_atom_data(shape, kind=codes.dtype.name)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
atom = cls.get_atom_datetime64(shape)
elif is_timedelta64_dtype(dtype):
atom = cls.get_atom_timedelta64(shape)
elif is_complex_dtype(dtype):
atom = _tables().ComplexCol(itemsize=itemsize, shape=shape[0])
elif is_string_dtype(dtype):
atom = cls.get_atom_string(shape, itemsize)
else:
atom = cls.get_atom_data(shape, kind=dtype.name)
return atom
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=shape[0])
@classmethod
def get_atom_coltype(cls, kind: str) -> Type[Col]:
""" return the PyTables column class for this column """
if kind.startswith("uint"):
k4 = kind[4:]
col_name = f"UInt{k4}Col"
elif kind.startswith("period"):
# we store as integer
col_name = "Int64Col"
else:
kcap = kind.capitalize()
col_name = f"{kcap}Col"
return getattr(_tables(), col_name)
@classmethod
def get_atom_data(cls, shape, kind: str) -> Col:
return cls.get_atom_coltype(kind=kind)(shape=shape[0])
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col(shape=shape[0])
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col(shape=shape[0])
@property
def shape(self):
return getattr(self.data, "shape", None)
@property
def cvalues(self):
""" return my cython values """
return self.data
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if existing_fields is not None and existing_fields != list(self.values):
raise ValueError("appended items do not match existing items in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if existing_dtype is not None and existing_dtype != self.dtype:
raise ValueError(
"appended items dtype do not match existing items dtype in table!"
)
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Convert the data from this selection to the appropriate pandas type.
Parameters
----------
values : np.ndarray
nan_rep :
encoding : str
errors : str
Returns
-------
index : listlike to become an Index
data : ndarraylike to become a column
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
assert self.typ is not None
if self.dtype is None:
# Note: in tests we never have timedelta64 or datetime64,
# so the _get_data_and_dtype_name may be unnecessary
converted, dtype_name = _get_data_and_dtype_name(values)
kind = _dtype_to_kind(dtype_name)
else:
converted = values
dtype_name = self.dtype
kind = self.kind
assert isinstance(converted, np.ndarray) # for mypy
# use the meta if needed
meta = _ensure_decoded(self.meta)
metadata = self.metadata
ordered = self.ordered
tz = self.tz
assert dtype_name is not None
# convert to the correct dtype
dtype = _ensure_decoded(dtype_name)
# reverse converts
if dtype == "datetime64":
# recreate with tz if indicated
converted = _set_tz(converted, tz, coerce=True)
elif dtype == "timedelta64":
converted = np.asarray(converted, dtype="m8[ns]")
elif dtype == "date":
try:
converted = np.asarray(
[date.fromordinal(v) for v in converted], dtype=object
)
except ValueError:
converted = np.asarray(
[date.fromtimestamp(v) for v in converted], dtype=object
)
elif meta == "category":
# we have a categorical
categories = metadata
codes = converted.ravel()
# if we have stored a NaN in the categories
# then strip it; in theory we could have BOTH
# -1s in the codes and nulls :<
if categories is None:
# Handle case of NaN-only categorical columns in which case
# the categories are an empty array; when this is stored,
# pytables cannot write a zero-len array, so on readback
# the categories would be None and `read_hdf()` would fail.
categories = Index([], dtype=np.float64)
else:
mask = isna(categories)
if mask.any():
categories = categories[~mask]
codes[codes != -1] -= mask.astype(int).cumsum()._values
converted = Categorical.from_codes(
codes, categories=categories, ordered=ordered
)
else:
try:
converted = converted.astype(dtype, copy=False)
except TypeError:
converted = converted.astype("O", copy=False)
# convert nans / decode
if _ensure_decoded(kind) == "string":
converted = _unconvert_string_array(
converted, nan_rep=nan_rep, encoding=encoding, errors=errors
)
return self.values, converted
def set_attr(self):
""" set the data for this column """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
assert self.dtype is not None
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
""" represent a data column that can be indexed """
is_data_indexable = True
def validate_names(self):
if not Index(self.values).is_object():
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize)
@classmethod
def get_atom_data(cls, shape, kind: str) -> Col:
return cls.get_atom_coltype(kind=kind)()
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col()
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
""" represent a generic pytables data column """
pass
class Fixed:
"""
represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : HDFStore
group : Node
The group node where the table resides.
"""
pandas_kind: str
format_type: str = "fixed" # GH#30962 needed by dask
obj_type: Type[FrameOrSeriesUnion]
ndim: int
encoding: str
parent: HDFStore
group: Node
errors: str
is_table = False
def __init__(
self,
parent: HDFStore,
group: Node,
encoding: str = "UTF-8",
errors: str = "strict",
):
assert isinstance(parent, HDFStore), type(parent)
assert _table_mod is not None # needed for mypy
assert isinstance(group, _table_mod.Node), type(group)
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.errors = errors
@property
def is_old_version(self) -> bool:
return self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1
@property
def version(self) -> Tuple[int, int, int]:
""" compute and set our version """
version = _ensure_decoded(getattr(self.group._v_attrs, "pandas_version", None))
try:
version = tuple(int(x) for x in version.split("."))
if len(version) == 2:
version = version + (0,)
except AttributeError:
version = (0, 0, 0)
return version
@property
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs, "pandas_type", None))
def __repr__(self) -> str:
""" return a pretty representation of myself """
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
jshape = ",".join(pprint_thing(x) for x in s)
s = f"[{jshape}]"
return f"{self.pandas_type:12.12} (shape->{s})"
return self.pandas_type
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
def copy(self):
new_self = copy.copy(self)
return new_self
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self) -> int:
return self.parent._complevel
@property
def _fletcher32(self) -> bool:
return self.parent._fletcher32
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self):
""" set our object attributes """
pass
def get_attrs(self):
""" get our object attributes """
pass
@property
def storable(self):
""" return my storable """
return self.group
@property
def is_exists(self) -> bool:
return False
@property
def nrows(self):
return getattr(self.storable, "nrows", None)
def validate(self, other):
""" validate against an existing storable """
if other is None:
return
return True
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
return True
def infer_axes(self):
"""
infer the axes of my storer
return a boolean indicating if we have a valid storer or not
"""
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement"
)
def write(self, **kwargs):
raise NotImplementedError(
"cannot write on an abstract storer: subclasses should implement"
)
def delete(
self, where=None, start: Optional[int] = None, stop: Optional[int] = None
):
"""
support fully deleting the node in its entirety (only) - where
specification must be None
"""
if com.all_none(where, start, stop):
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
""" a generified fixed version """
_index_type_map = {DatetimeIndex: "datetime", PeriodIndex: "period"}
_reverse_index_map = {v: k for k, v in _index_type_map.items()}
attributes: List[str] = []
# indexer helpers
def _class_to_alias(self, cls) -> str:
return self._index_type_map.get(cls, "")
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, attrs):
index_class = self._alias_to_class(
_ensure_decoded(getattr(attrs, "index_class", ""))
)
factory: Callable
if index_class == DatetimeIndex:
def f(values, freq=None, tz=None):
# data are already in UTC, localize and convert if tz present
dta = DatetimeArray._simple_new(values.values, freq=freq)
result = DatetimeIndex._simple_new(dta, name=None)
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
factory = f
elif index_class == PeriodIndex:
def f(values, freq=None, tz=None):
parr = PeriodArray._simple_new(values, freq=freq)
return PeriodIndex._simple_new(parr, name=None)
factory = f
else:
factory = index_class
kwargs = {}
if "freq" in attrs:
kwargs["freq"] = attrs["freq"]
if index_class is Index:
# DTI/PI would be gotten by _alias_to_class
factory = TimedeltaIndex
if "tz" in attrs:
if isinstance(attrs["tz"], bytes):
# created by python2
kwargs["tz"] = attrs["tz"].decode("utf-8")
else:
# created by python3
kwargs["tz"] = attrs["tz"]
assert index_class is DatetimeIndex # just checking
return factory, kwargs
def validate_read(self, columns, where):
"""
raise if any keywords are passed which are not-None
"""
if columns is not None:
raise TypeError(
"cannot pass a column specification when reading "
"a Fixed format store. this store must be selected in its entirety"
)
if where is not None:
raise TypeError(
"cannot pass a where specification when reading "
"from a Fixed format store. this store must be selected in its entirety"
)
@property
def is_exists(self) -> bool:
return True
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
def get_attrs(self):
""" retrieve our attributes """
self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict"))
for n in self.attributes:
setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
def write(self, obj, **kwargs):
self.set_attrs()
def read_array(
self, key: str, start: Optional[int] = None, stop: Optional[int] = None
):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, "transposed", False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = _ensure_decoded(getattr(attrs, "value_type", None))
shape = getattr(attrs, "shape", None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype == "datetime64":
# reconstruct a timezone if indicated
tz = getattr(attrs, "tz", None)
ret = _set_tz(ret, tz, coerce=True)
elif dtype == "timedelta64":
ret = np.asarray(ret, dtype="m8[ns]")
if transposed:
return ret.T
else:
return ret
def read_index(
self, key: str, start: Optional[int] = None, stop: Optional[int] = None
) -> Index:
variety = _ensure_decoded(getattr(self.attrs, f"{key}_variety"))
if variety == "multi":
return self.read_multi_index(key, start=start, stop=stop)
elif variety == "regular":
node = getattr(self.group, key)
index = self.read_index_node(node, start=start, stop=stop)
return index
else: # pragma: no cover
raise TypeError(f"unrecognized index variety: {variety}")
def write_index(self, key: str, index: Index):
if isinstance(index, MultiIndex):
setattr(self.attrs, f"{key}_variety", "multi")
self.write_multi_index(key, index)
else:
setattr(self.attrs, f"{key}_variety", "regular")
converted = _convert_index("index", index, self.encoding, self.errors)
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
node._v_attrs.freq = index.freq
if isinstance(index, DatetimeIndex) and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_multi_index(self, key: str, index: MultiIndex):
setattr(self.attrs, f"{key}_nlevels", index.nlevels)
for i, (lev, level_codes, name) in enumerate(
zip(index.levels, index.codes, index.names)
):
# write the level
if is_extension_array_dtype(lev):
raise NotImplementedError(
"Saving a MultiIndex with an extension dtype is not supported."
)
level_key = f"{key}_level{i}"
conv_level = _convert_index(level_key, lev, self.encoding, self.errors)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, f"{key}_name{name}", name)
# write the labels
label_key = f"{key}_label{i}"
self.write_array(label_key, level_codes)
def read_multi_index(
self, key: str, start: Optional[int] = None, stop: Optional[int] = None
) -> MultiIndex:
nlevels = getattr(self.attrs, f"{key}_nlevels")
levels = []
codes = []
names: List[Hashable] = []
for i in range(nlevels):
level_key = f"{key}_level{i}"
node = getattr(self.group, level_key)
lev = self.read_index_node(node, start=start, stop=stop)
levels.append(lev)
names.append(lev.name)
label_key = f"{key}_label{i}"
level_codes = self.read_array(label_key, start=start, stop=stop)
codes.append(level_codes)
return MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=True
)
def read_index_node(
self, node: Node, start: Optional[int] = None, stop: Optional[int] = None
) -> Index:
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we replace it with the original.
if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0:
data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type)
kind = _ensure_decoded(node._v_attrs.kind)
name = None
if "name" in node._v_attrs:
name = _ensure_str(node._v_attrs.name)
name = _ensure_decoded(name)
attrs = node._v_attrs
factory, kwargs = self._get_index_factory(attrs)
if kind == "date":
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
),
dtype=object,
**kwargs,
)
else:
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
),
**kwargs,
)
index.name = name
return index
def write_array_empty(self, key: str, value: ArrayLike):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
node = getattr(self.group, key)
node._v_attrs.value_type = str(value.dtype)
node._v_attrs.shape = value.shape
def write_array(self, key: str, obj: FrameOrSeries, items: Optional[Index] = None):
# TODO: we only have a few tests that get here, the only EA
# that gets passed is DatetimeArray, and we never have
# both self._filters and EA
value = extract_array(obj, extract_numpy=True)
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = value.size == 0
transposed = False
if is_categorical_dtype(value.dtype):
raise NotImplementedError(
"Cannot store a category dtype in a HDF5 dataset that uses format="
'"fixed". Use format="table".'
)
if not empty_array:
if hasattr(value, "T"):
# ExtensionArrays (1d) may not have transpose.
value = value.T
transposed = True
atom = None
if self._filters is not None:
with suppress(ValueError):
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
if atom is not None:
# We only get here if self._filters is non-None and
# the Atom.from_dtype call succeeded
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(
self.group, key, atom, value.shape, filters=self._filters
)
ca[:] = value
else:
self.write_array_empty(key, value)
elif value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value, skipna=False)
if empty_array:
pass
elif inferred_type == "string":
pass
else:
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom())
vlarr.append(value)
elif is_datetime64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view("i8"))
getattr(self.group, key)._v_attrs.value_type = "datetime64"
elif is_datetime64tz_dtype(value.dtype):
# store as UTC
# with a zone
self._handle.create_array(self.group, key, value.asi8)
node = getattr(self.group, key)
node._v_attrs.tz = _get_tz(value.tz)
node._v_attrs.value_type = "datetime64"
elif is_timedelta64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view("i8"))
getattr(self.group, key)._v_attrs.value_type = "timedelta64"
elif empty_array:
self.write_array_empty(key, value)
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class SeriesFixed(GenericFixed):
pandas_kind = "series"
attributes = ["name"]
name: Hashable
@property
def shape(self):
try:
return (len(self.group.values),)
except (TypeError, AttributeError):
return None
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
self.validate_read(columns, where)
index = self.read_index("index", start=start, stop=stop)
values = self.read_array("values", start=start, stop=stop)
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
super().write(obj, **kwargs)
self.write_index("index", obj.index)
self.write_array("values", obj)
self.attrs.name = obj.name
class BlockManagerFixed(GenericFixed):
attributes = ["ndim", "nblocks"]
nblocks: int
@property
def shape(self) -> Optional[Shape]:
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, f"block{i}_items")
shape = getattr(node, "shape", None)
if shape is not None:
items += shape[0]
# data shape
node = self.group.block0_values
shape = getattr(node, "shape", None)
if shape is not None:
shape = list(shape[0 : (ndim - 1)])
else:
shape = []
shape.append(items)
return shape
except AttributeError:
return None
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
# start, stop applied to rows, so 0th axis only
self.validate_read(columns, where)
select_axis = self.obj_type()._get_block_manager_axis(0)
axes = []
for i in range(self.ndim):
_start, _stop = (start, stop) if i == select_axis else (None, None)
ax = self.read_index(f"axis{i}", start=_start, stop=_stop)
axes.append(ax)
items = axes[0]
dfs = []
for i in range(self.nblocks):
blk_items = self.read_index(f"block{i}_items")
values = self.read_array(f"block{i}_values", start=_start, stop=_stop)
columns = items[items.get_indexer(blk_items)]
df = DataFrame(values.T, columns=columns, index=axes[1])
dfs.append(df)
if len(dfs) > 0:
out = concat(dfs, axis=1)
out = out.reindex(columns=items, copy=False)
return out
return DataFrame(columns=axes[0], index=axes[1])
def write(self, obj, **kwargs):
super().write(obj, **kwargs)
data = obj._mgr
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0 and (not ax.is_unique):
raise ValueError("Columns index has to be unique for fixed format")
self.write_index(f"axis{i}", ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array(f"block{i}_values", blk.values, items=blk_items)
self.write_index(f"block{i}_items", blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = "frame"
obj_type = DataFrame
class Table(Fixed):
"""
represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = "wide_table"
format_type: str = "table" # GH#30962 needed by dask
table_type: str
levels: Union[int, List[Hashable]] = 1
is_table = True
index_axes: List[IndexCol]
non_index_axes: List[Tuple[int, Any]]
values_axes: List[DataCol]
data_columns: List
metadata: List
info: Dict
def __init__(
self,
parent: HDFStore,
group: Node,
encoding=None,
errors: str = "strict",
index_axes=None,
non_index_axes=None,
values_axes=None,
data_columns=None,
info=None,
nan_rep=None,
):
super().__init__(parent, group, encoding=encoding, errors=errors)
self.index_axes = index_axes or []
self.non_index_axes = non_index_axes or []
self.values_axes = values_axes or []
self.data_columns = data_columns or []
self.info = info or {}
self.nan_rep = nan_rep
@property
def table_type_short(self) -> str:
return self.table_type.split("_")[0]
def __repr__(self) -> str:
""" return a pretty representation of myself """
self.infer_axes()
jdc = ",".join(self.data_columns) if len(self.data_columns) else ""
dc = f",dc->[{jdc}]"
ver = ""
if self.is_old_version:
jver = ".".join(str(x) for x in self.version)
ver = f"[{jver}]"
jindex_axes = ",".join(a.name for a in self.index_axes)
return (
f"{self.pandas_type:12.12}{ver} "
f"(typ->{self.table_type_short},nrows->{self.nrows},"
f"ncols->{self.ncols},indexers->[{jindex_axes}]{dc})"
)
def __getitem__(self, c: str):
""" return the axis for c """
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError(
"incompatible table_type with existing "
f"[{other.table_type} - {self.table_type}]"
)
for c in ["index_axes", "non_index_axes", "values_axes"]:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
f"invalid combination of [{c}] on appending data "
f"[{sax}] vs current table [{oax}]"
)
# should never get here
raise Exception(
f"invalid combination of [{c}] on appending data [{sv}] vs "
f"current table [{ov}]"
)
@property
def is_multi_index(self) -> bool:
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_multiindex(
self, obj: FrameOrSeriesUnion
) -> Tuple[DataFrame, List[Hashable]]:
"""
validate that we can store the multi-index; reset and return the
new object
"""
levels = [
l if l is not None else f"level_{i}" for i, l in enumerate(obj.index.names)
]
try:
reset_obj = obj.reset_index()
except ValueError as err:
raise ValueError(
"duplicate names/columns in the multi-index when storing as a table"
) from err
assert isinstance(reset_obj, DataFrame) # for mypy
return reset_obj, levels
@property
def nrows_expected(self) -> int:
""" based on our axes, compute the expected nrows """
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
@property
def is_exists(self) -> bool:
""" has this table been created """
return "table" in self.group
@property
def storable(self):
return getattr(self.group, "table", None)
@property
def table(self):
""" return the table group (this is my storable) """
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self):
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self) -> int:
""" the number of total columns in the values axes """
return sum(len(a.values) for a in self.values_axes)
@property
def is_transposed(self) -> bool:
return False
@property
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(
itertools.chain(
[int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes],
)
)
def queryables(self) -> Dict[str, Any]:
""" return a dict of the kinds allowable columns for this object """
# mypy doesn't recognize DataFrame._AXIS_NAMES, so we re-write it here
axis_names = {0: "index", 1: "columns"}
# compute the values_axes queryables
d1 = [(a.cname, a) for a in self.index_axes]
d2 = [(axis_names[axis], None) for axis, values in self.non_index_axes]
d3 = [
(v.cname, v) for v in self.values_axes if v.name in set(self.data_columns)
]
# error: Unsupported operand types for + ("List[Tuple[str, IndexCol]]"
# and "List[Tuple[str, None]]")
return dict(d1 + d2 + d3) # type: ignore[operator]
def index_cols(self):
""" return a list of my index cols """
# Note: each `i.cname` below is assured to be a str.
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self) -> List[str]:
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key: str) -> str:
""" return the metadata pathname for this key """
group = self.group._v_pathname
return f"{group}/meta/{key}/meta"
def write_metadata(self, key: str, values: np.ndarray):
"""
Write out a metadata array to the key as a fixed-format Series.
Parameters
----------
key : str
values : ndarray
"""
values = Series(values)
self.parent.put(
self._get_metadata_path(key),
values,
format="table",
encoding=self.encoding,
errors=self.errors,
nan_rep=self.nan_rep,
)
def read_metadata(self, key: str):
""" return the meta data array for this key """
if getattr(getattr(self.group, "meta", None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
self.attrs.levels = self.levels
self.attrs.info = self.info
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = getattr(self.attrs, "non_index_axes", None) or []
self.data_columns = getattr(self.attrs, "data_columns", None) or []
self.info = getattr(self.attrs, "info", None) or {}
self.nan_rep = getattr(self.attrs, "nan_rep", None)
self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict"))
self.levels: List[Hashable] = getattr(self.attrs, "levels", None) or []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1:
ws = incompatibility_doc % ".".join([str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
def validate_min_itemsize(self, min_itemsize):
"""
validate the min_itemsize doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == "values":
continue
if k not in q:
raise ValueError(
f"min_itemsize has the key [{k}] which is not an axis or "
"data_column"
)
@cache_readonly
def indexables(self):
""" create/cache the indexables if they don't exist """
_indexables = []
desc = self.description
table_attrs = self.table.attrs
# Note: each of the `name` kwargs below are str, ensured
# by the definition in index_cols.
# index columns
for i, (axis, name) in enumerate(self.attrs.index_cols):
atom = getattr(desc, name)
md = self.read_metadata(name)
meta = "category" if md is not None else None
kind_attr = f"{name}_kind"
kind = getattr(table_attrs, kind_attr, None)
index_col = IndexCol(
name=name,
axis=axis,
pos=i,
kind=kind,
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
_indexables.append(index_col)
# values columns
dc = set(self.data_columns)
base_pos = len(_indexables)
def f(i, c):
assert isinstance(c, str)
klass = DataCol
if c in dc:
klass = DataIndexableCol
atom = getattr(desc, c)
adj_name = _maybe_adjust_name(c, self.version)
# TODO: why kind_attr here?
values = getattr(table_attrs, f"{adj_name}_kind", None)
dtype = getattr(table_attrs, f"{adj_name}_dtype", None)
kind = _dtype_to_kind(dtype)
md = self.read_metadata(c)
# TODO: figure out why these two versions of `meta` dont always match.
# meta = "category" if md is not None else None
meta = getattr(table_attrs, f"{adj_name}_meta", None)
obj = klass(
name=adj_name,
cname=c,
values=values,
kind=kind,
pos=base_pos + i,
typ=atom,
table=self.table,
meta=meta,
metadata=md,
dtype=dtype,
)
return obj
# Note: the definition of `values_cols` ensures that each
# `c` below is a str.
_indexables.extend([f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return _indexables
def create_index(self, columns=None, optlevel=None, kind: Optional[str] = None):
"""
Create a pytables index on the specified columns.
Parameters
----------
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError if trying to create an index on a complex-type column.
Notes
-----
Cannot index Time64Col or ComplexCol.
Pytables must be >= 3.0.
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = {}
if optlevel is not None:
kw["optlevel"] = optlevel
if kind is not None:
kw["kind"] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw["kind"] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw["optlevel"] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith("complex"):
raise TypeError(
"Columns containing complex values can be stored but "
"cannot be indexed when using table format. Either use "
"fixed format, set index=False, or do not include "
"the columns containing complex values to "
"data_columns when initializing the table."
)
v.create_index(**kw)
elif c in self.non_index_axes[0][1]:
# GH 28156
raise AttributeError(
f"column {c} is not a data_column.\n"
f"In order to read column {c} you must reload the dataframe \n"
f"into HDFStore and include {c} with the data_columns argument."
)
def _read_axes(
self, where, start: Optional[int] = None, stop: Optional[int] = None
) -> List[Tuple[ArrayLike, ArrayLike]]:
"""
Create the axes sniffed from the table.
Parameters
----------
where : ???
start : int or None, default None
stop : int or None, default None
Returns
-------
List[Tuple[index_values, column_values]]
"""
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
values = selection.select()
results = []
# convert the data
for a in self.axes:
a.set_info(self.info)
res = a.convert(
values,
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
results.append(res)
return results
@classmethod
def get_object(cls, obj, transposed: bool):
""" return the data for this obj """
return obj
def validate_data_columns(self, data_columns, min_itemsize, non_index_axes):
"""
take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(non_index_axes):
return []
axis, axis_labels = non_index_axes[0]
info = self.info.get(axis, {})
if info.get("type") == "MultiIndex" and data_columns:
raise ValueError(
f"cannot use a multi-index on axis [{axis}] with "
f"data_columns {data_columns}"
)
# evaluate the passed data_columns, True == use all columns
# take only valid axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns = list(data_columns) # ensure we do not modify
data_columns.extend(
[
k
for k in min_itemsize.keys()
if k != "values" and k not in existing_data_columns
]
)
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def _create_axes(
self,
axes,
obj: DataFrame,
validate: bool = True,
nan_rep=None,
data_columns=None,
min_itemsize=None,
):
"""
Create and return the axes.
Parameters
----------
axes: list or None
The names or numbers of the axes to create.
obj : DataFrame
The object to create axes on.
validate: bool, default True
Whether to validate the obj against an existing object already written.
nan_rep :
A value to use for string column nan_rep.
data_columns : List[str], True, or None, default None
Specify the columns that we want to create to allow indexing on.
* True : Use all available columns.
* None : Use no columns.
* List[str] : Use the specified columns.
min_itemsize: Dict[str, int] or None, default None
The min itemsize for a column in bytes.
"""
if not isinstance(obj, DataFrame):
group = self.group._v_name
raise TypeError(
f"cannot properly create the storer for: [group->{group},"
f"value->{type(obj)}]"
)
# set the default axes if needed
if axes is None:
axes = [0]
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
table_exists = True
axes = [a.axis for a in self.index_axes]
data_columns = list(self.data_columns)
nan_rep = self.nan_rep
# TODO: do we always have validate=True here?
else:
table_exists = False
new_info = self.info
assert self.ndim == 2 # with next check, we must have len(axes) == 1
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable"
)
# create according to the new data
new_non_index_axes: List = []
# nan_representation
if nan_rep is None:
nan_rep = "nan"
# We construct the non-index-axis first, since that alters new_info
idx = [x for x in [0, 1] if x not in axes][0]
a = obj.axes[idx]
# we might be able to change the axes on the appending data if necessary
append_axis = list(a)
if table_exists:
indexer = len(new_non_index_axes) # i.e. 0
exist_axis = self.non_index_axes[indexer][1]
if not array_equivalent(np.array(append_axis), np.array(exist_axis)):
# ahah! -> reindex
if array_equivalent(
np.array(sorted(append_axis)), np.array(sorted(exist_axis))
):
append_axis = exist_axis
# the non_index_axes info
info = new_info.setdefault(idx, {})
info["names"] = list(a.names)
info["type"] = type(a).__name__
new_non_index_axes.append((idx, append_axis))
# Now we can construct our new index axis
idx = axes[0]
a = obj.axes[idx]
axis_name = obj._get_axis_name(idx)
new_index = _convert_index(axis_name, a, self.encoding, self.errors)
new_index.axis = idx
# Because we are always 2D, there is only one new_index, so
# we know it will have pos=0
new_index.set_pos(0)
new_index.update_info(new_info)
new_index.maybe_set_size(min_itemsize) # check for column conflicts
new_index_axes = [new_index]
j = len(new_index_axes) # i.e. 1
assert j == 1
# reindex by our non_index_axes & compute data_columns
assert len(new_non_index_axes) == 1
for a in new_non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
transposed = new_index.axis == 1
# figure out data_columns and get out blocks
data_columns = self.validate_data_columns(
data_columns, min_itemsize, new_non_index_axes
)
frame = self.get_object(obj, transposed)._consolidate()
blocks, blk_items = self._get_blocks_and_items(
frame, table_exists, new_non_index_axes, self.values_axes, data_columns
)
# add my values
vaxes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if data_columns and len(b_items) == 1 and b_items[0] in data_columns:
klass = DataIndexableCol
name = b_items[0]
if not (name is None or isinstance(name, str)):
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
# make sure that we match up the existing columns
# if we have an existing table
existing_col: Optional[DataCol]
if table_exists and validate:
try:
existing_col = self.values_axes[i]
except (IndexError, KeyError) as err:
raise ValueError(
f"Incompatible appended table [{blocks}]"
f"with existing table [{self.values_axes}]"
) from err
else:
existing_col = None
new_name = name or f"values_block_{i}"
data_converted = _maybe_convert_for_string_atom(
new_name,
b,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
errors=self.errors,
block_columns=b_items,
)
adj_name = _maybe_adjust_name(new_name, self.version)
typ = klass._get_atom(data_converted)
kind = _dtype_to_kind(data_converted.dtype.name)
tz = _get_tz(data_converted.tz) if hasattr(data_converted, "tz") else None
meta = metadata = ordered = None
if is_categorical_dtype(data_converted.dtype):
ordered = data_converted.ordered
meta = "category"
metadata = np.array(data_converted.categories, copy=False).ravel()
data, dtype_name = _get_data_and_dtype_name(data_converted)
col = klass(
name=adj_name,
cname=new_name,
values=list(b_items),
typ=typ,
pos=j,
kind=kind,
tz=tz,
ordered=ordered,
meta=meta,
metadata=metadata,
dtype=dtype_name,
data=data,
)
col.update_info(new_info)
vaxes.append(col)
j += 1
dcs = [col.name for col in vaxes if col.is_data_indexable]
new_table = type(self)(
parent=self.parent,
group=self.group,
encoding=self.encoding,
errors=self.errors,
index_axes=new_index_axes,
non_index_axes=new_non_index_axes,
values_axes=vaxes,
data_columns=dcs,
info=new_info,
nan_rep=nan_rep,
)
if hasattr(self, "levels"):
# TODO: get this into constructor, only for appropriate subclass
new_table.levels = self.levels
new_table.validate_min_itemsize(min_itemsize)
if validate and table_exists:
new_table.validate(self)
return new_table
@staticmethod
def _get_blocks_and_items(
frame: DataFrame,
table_exists: bool,
new_non_index_axes,
values_axes,
data_columns,
):
# Helper to clarify non-state-altering parts of _create_axes
def get_blk_items(mgr):
return [mgr.items.take(blk.mgr_locs) for blk in mgr.blocks]
mgr = frame._mgr
mgr = cast(BlockManager, mgr)
blocks: List[Block] = list(mgr.blocks)
blk_items: List[Index] = get_blk_items(mgr)
if len(data_columns):
axis, axis_labels = new_non_index_axes[0]
new_labels = Index(axis_labels).difference(Index(data_columns))
mgr = frame.reindex(new_labels, axis=axis)._mgr
blocks = list(mgr.blocks) # type: ignore[union-attr]
blk_items = get_blk_items(mgr)
for c in data_columns:
mgr = frame.reindex([c], axis=axis)._mgr
blocks.extend(mgr.blocks) # type: ignore[union-attr]
blk_items.extend(get_blk_items(mgr))
# reorder the blocks in the same order as the existing table if we can
if table_exists:
by_items = {
tuple(b_items.tolist()): (b, b_items)
for b, b_items in zip(blocks, blk_items)
}
new_blocks: List["Block"] = []
new_blk_items = []
for ea in values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except (IndexError, KeyError) as err:
jitems = ",".join(pprint_thing(item) for item in items)
raise ValueError(
f"cannot match existing table structure for [{jitems}] "
"on appending data"
) from err
blocks = new_blocks
blk_items = new_blk_items
return blocks, blk_items
def process_axes(self, obj, selection: Selection, columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
assert isinstance(self.levels, list) # assured by is_multi_index
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if selection.filter is not None:
for field, op, filt in selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_ORDERS:
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
assert axis_number is not None
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc(axis=axis_number)[takers]
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = ensure_index(getattr(obj, field).values)
filt = ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc(axis=axis_number)[takers]
raise ValueError(f"cannot find the field [{field}] for filtering!")
obj = process_filter(field, filt)
return obj
def create_description(
self,
complib,
complevel: Optional[int],
fletcher32: bool,
expectedrows: Optional[int],
) -> Dict[str, Any]:
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = {"name": "table", "expectedrows": expectedrows}
# description from the axes & values
d["description"] = {a.cname: a.typ for a in self.axes}
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel,
complib=complib,
fletcher32=fletcher32 or self._fletcher32,
)
d["filters"] = filters
elif self._filters is not None:
d["filters"] = self._filters
return d
def read_coordinates(
self, where=None, start: Optional[int] = None, stop: Optional[int] = None
):
"""
select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
coords = selection.select_coords()
if selection.filter is not None:
for field, op, filt in selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1
)
coords = coords[op(data.iloc[coords - coords.min()], filt).values]
return Index(coords)
def read_column(
self,
column: str,
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
f"column [{column}] can not be extracted individually; "
"it is not data indexable"
)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
col_values = a.convert(
c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
return Series(_set_tz(col_values[1], a.tz), name=column)
raise KeyError(f"column [{column}] not found in the table")
class WORMTable(Table):
"""
a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = "worm"
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
"""
read the indices and the indexing array, calculate offset rows and return
"""
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs):
"""
write in a format that we can search later on (but cannot append
to): write out the indices and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORMTable needs to implement write")
class AppendableTable(Table):
""" support the new appendable table formats """
table_type = "appendable"
def write(
self,
obj,
axes=None,
append=False,
complib=None,
complevel=None,
fletcher32=None,
min_itemsize=None,
chunksize=None,
expectedrows=None,
dropna=False,
nan_rep=None,
data_columns=None,
track_times=True,
):
if not append and self.is_exists:
self._handle.remove_node(self.group, "table")
# create the axes
table = self._create_axes(
axes=axes,
obj=obj,
validate=append,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
)
for a in table.axes:
a.validate_names()
if not table.is_exists:
# create the table
options = table.create_description(
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows,
)
# set the table attributes
table.set_attrs()
options["track_times"] = track_times
# create the table
table._handle.create_table(table.group, **options)
# update my info
table.attrs.info = table.info
# validate the axes and set the kinds
for a in table.axes:
a.validate_and_set(table, append)
# add the rows
table.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize: Optional[int], dropna: bool = False):
"""
we form the data into a 2-d including indexes,values,mask write chunk-by-chunk
"""
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype("u1", copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
assert nindexes == 1, nindexes # ensures we dont need to broadcast
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = nrows // chunksize + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in indexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues],
)
def write_data_chunk(
self,
rows: np.ndarray,
indexes: List[np.ndarray],
mask: Optional[np.ndarray],
values: List[np.ndarray],
):
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
if len(rows):
self.table.append(rows)
self.table.flush()
def delete(
self, where=None, start: Optional[int] = None, stop: Optional[int] = None
):
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
selection = Selection(self, where, start=start, stop=stop)
values = selection.select_coords()
# delete the rows in reverse order
sorted_series = Series(values).sort_values()
ln = len(sorted_series)
if ln:
# construct groups of consecutive rows
diff = sorted_series.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = sorted_series.take(range(g, pg))
table.remove_rows(
start=rows[rows.index[0]], stop=rows[rows.index[-1]] + 1
)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
""" support the new appendable table formats """
pandas_kind = "frame_table"
table_type = "appendable_frame"
ndim = 2
obj_type: Type[FrameOrSeriesUnion] = DataFrame
@property
def is_transposed(self) -> bool:
return self.index_axes[0].axis == 1
@classmethod
def get_object(cls, obj, transposed: bool):
""" these are written transposed """
if transposed:
obj = obj.T
return obj
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return None
result = self._read_axes(where=where, start=start, stop=stop)
info = (
self.info.get(self.non_index_axes[0][0], {})
if len(self.non_index_axes)
else {}
)
inds = [i for i, ax in enumerate(self.axes) if ax is self.index_axes[0]]
assert len(inds) == 1
ind = inds[0]
index = result[ind][0]
frames = []
for i, a in enumerate(self.axes):
if a not in self.values_axes:
continue
index_vals, cvalues = result[i]
# we could have a multi-index constructor here
# ensure_index doesn't recognized our list-of-tuples here
if info.get("type") == "MultiIndex":
cols = MultiIndex.from_tuples(index_vals)
else:
cols = Index(index_vals)
names = info.get("names")
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, "name", None))
else:
values = cvalues.T
index_ = Index(index, name=getattr(index, "name", None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
if isinstance(values, np.ndarray):
df = DataFrame(values.T, columns=cols_, index=index_)
elif isinstance(values, Index):
df = DataFrame(values, columns=cols_, index=index_)
else:
# Categorical
df = DataFrame([values], columns=cols_, index=index_)
assert (df.dtypes == values.dtype).all(), (df.dtypes, values.dtype)
frames.append(df)
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1)
selection = Selection(self, where=where, start=start, stop=stop)
# apply the selection filters & axis orderings
df = self.process_axes(df, selection=selection, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
""" support the new appendable table formats """
pandas_kind = "series_table"
table_type = "appendable_series"
ndim = 2
obj_type = Series
@property
def is_transposed(self) -> bool:
return False
@classmethod
def get_object(cls, obj, transposed: bool):
return obj
def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or "values"
obj = obj.to_frame(name)
return super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
) -> Series:
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
assert isinstance(self.levels, list) # needed for mypy
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super().read(where=where, columns=columns, start=start, stop=stop)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == "values":
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
pandas_kind = "series_table"
table_type = "appendable_multiseries"
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or "values"
newobj, self.levels = self.validate_multiindex(obj)
assert isinstance(self.levels, list) # for mypy
cols = list(self.levels)
cols.append(name)
newobj.columns = Index(cols)
return super().write(obj=newobj, **kwargs)
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
pandas_kind = "frame_table"
table_type = "generic_table"
ndim = 2
obj_type = DataFrame
levels: List[Hashable]
@property
def pandas_type(self) -> str:
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, "table", None) or self.group
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@cache_readonly
def indexables(self):
""" create the indexables from the table description """
d = self.description
# TODO: can we get a typ for this? AFAICT it is the only place
# where we aren't passing one
# the index columns is just a simple index
md = self.read_metadata("index")
meta = "category" if md is not None else None
index_col = GenericIndexCol(
name="index", axis=0, table=self.table, meta=meta, metadata=md
)
_indexables: List[Union[GenericIndexCol, GenericDataIndexableCol]] = [index_col]
for i, n in enumerate(d._v_names):
assert isinstance(n, str)
atom = getattr(d, n)
md = self.read_metadata(n)
meta = "category" if md is not None else None
dc = GenericDataIndexableCol(
name=n,
pos=i,
values=[n],
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
_indexables.append(dc)
return _indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
table_type = "appendable_multiframe"
obj_type = DataFrame
ndim = 2
_re_levels = re.compile(r"^level_\d+$")
@property
def table_type_short(self) -> str:
return "appendable_multi"
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns.tolist()
obj, self.levels = self.validate_multiindex(obj)
assert isinstance(self.levels, list) # for mypy
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
return super().write(obj=obj, data_columns=data_columns, **kwargs)
def read(
self,
where=None,
columns=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
df = super().read(where=where, columns=columns, start=start, stop=stop)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names(
[None if self._re_levels.search(name) else name for name in df.index.names]
)
return df
def _reindex_axis(obj: DataFrame, axis: int, labels: Index, other=None) -> DataFrame:
ax = obj._get_axis(axis)
labels = ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = ensure_index(labels.unique())
if other is not None:
labels = ensure_index(other.unique()).intersection(labels, sort=False)
if not labels.equals(ax):
slicer: List[Union[slice, Index]] = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
# tz to/from coercion
def _get_tz(tz: tzinfo) -> Union[str, tzinfo]:
""" for a tz-aware type, return an encoded zone """
zone = timezones.get_timezone(tz)
return zone
def _set_tz(
values: Union[np.ndarray, Index],
tz: Optional[Union[str, tzinfo]],
coerce: bool = False,
) -> Union[np.ndarray, DatetimeIndex]:
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray or Index
tz : str or tzinfo
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if isinstance(values, DatetimeIndex):
# If values is tzaware, the tz gets dropped in the values.ravel()
# call below (which returns an ndarray). So we are only non-lossy
# if `tz` matches `values.tz`.
assert values.tz is None or values.tz == tz
if tz is not None:
if isinstance(values, DatetimeIndex):
name = values.name
values = values.asi8
else:
name = None
values = values.ravel()
tz = _ensure_decoded(tz)
values = DatetimeIndex(values, name=name)
values = values.tz_localize("UTC").tz_convert(tz)
elif coerce:
values = np.asarray(values, dtype="M8[ns]")
return values
def _convert_index(name: str, index: Index, encoding: str, errors: str) -> IndexCol:
assert isinstance(name, str)
index_name = index.name
converted, dtype_name = _get_data_and_dtype_name(index)
kind = _dtype_to_kind(dtype_name)
atom = DataIndexableCol._get_atom(converted)
if isinstance(index, Int64Index) or needs_i8_conversion(index.dtype):
# Includes Int64Index, RangeIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex,
# in which case "kind" is "integer", "integer", "datetime64",
# "timedelta64", and "integer", respectively.
return IndexCol(
name,
values=converted,
kind=kind,
typ=atom,
freq=getattr(index, "freq", None),
tz=getattr(index, "tz", None),
index_name=index_name,
)
if isinstance(index, MultiIndex):
raise TypeError("MultiIndex not supported here!")
inferred_type = lib.infer_dtype(index, skipna=False)
# we won't get inferred_type of "datetime64" or "timedelta64" as these
# would go through the DatetimeIndex/TimedeltaIndex paths above
values = np.asarray(index)
if inferred_type == "date":
converted = np.asarray([v.toordinal() for v in values], dtype=np.int32)
return IndexCol(
name, converted, "date", _tables().Time32Col(), index_name=index_name
)
elif inferred_type == "string":
converted = _convert_string_array(values, encoding, errors)
itemsize = converted.dtype.itemsize
return IndexCol(
name,
converted,
"string",
_tables().StringCol(itemsize),
index_name=index_name,
)
elif inferred_type in ["integer", "floating"]:
return IndexCol(
name, values=converted, kind=kind, typ=atom, index_name=index_name
)
else:
assert isinstance(converted, np.ndarray) and converted.dtype == object
assert kind == "object", kind
atom = _tables().ObjectAtom()
return IndexCol(name, converted, kind, atom, index_name=index_name)
def _unconvert_index(
data, kind: str, encoding: str, errors: str
) -> Union[np.ndarray, Index]:
index: Union[Index, np.ndarray]
if kind == "datetime64":
index = DatetimeIndex(data)
elif kind == "timedelta64":
index = TimedeltaIndex(data)
elif kind == "date":
try:
index = np.asarray([date.fromordinal(v) for v in data], dtype=object)
except (ValueError):
index = np.asarray([date.fromtimestamp(v) for v in data], dtype=object)
elif kind in ("integer", "float"):
index = np.asarray(data)
elif kind in ("string"):
index = _unconvert_string_array(
data, nan_rep=None, encoding=encoding, errors=errors
)
elif kind == "object":
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError(f"unrecognized index type {kind}")
return index
def _maybe_convert_for_string_atom(
name: str,
block: Block,
existing_col,
min_itemsize,
nan_rep,
encoding,
errors,
block_columns: List[str],
):
if not block.is_object:
return block.values
dtype_name = block.dtype.name
inferred_type = lib.infer_dtype(block.values, skipna=False)
if inferred_type == "date":
raise TypeError("[date] is not implemented as a table column")
elif inferred_type == "datetime":
# after GH#8260
# this only would be hit for a multi-timezone dtype which is an error
raise TypeError(
"too many timezones in this block, create separate data columns"
)
elif not (inferred_type == "string" or dtype_name == "object"):
return block.values
blocks: List[Block] = block.fillna(nan_rep, downcast=False)
# Note: because block is always object dtype, fillna goes
# through a path such that the result is always a 1-element list
assert len(blocks) == 1
block = blocks[0]
data = block.values
# see if we have a valid string type
inferred_type = lib.infer_dtype(data, skipna=False)
if inferred_type != "string":
# we cannot serialize this data, so report an exception on a column
# by column basis
# expected behaviour:
# search block for a non-string object column by column
for i in range(block.shape[0]):
col = block.iget(i)
inferred_type = lib.infer_dtype(col, skipna=False)
if inferred_type != "string":
error_column_label = (
block_columns[i] if len(block_columns) > i else f"No.{i}"
)
raise TypeError(
f"Cannot serialize the column [{error_column_label}]\n"
f"because its data contents are not [string] but "
f"[{inferred_type}] object dtype"
)
# itemsize is the maximum length of a string (along any dimension)
data_converted = _convert_string_array(data, encoding, errors).reshape(data.shape)
assert data_converted.shape == block.shape, (data_converted.shape, block.shape)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(name) or min_itemsize.get("values") or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci > itemsize:
itemsize = eci
data_converted = data_converted.astype(f"|S{itemsize}", copy=False)
return data_converted
def _convert_string_array(data: np.ndarray, encoding: str, errors: str) -> np.ndarray:
"""
Take a string-like that is object dtype and coerce to a fixed size string type.
Parameters
----------
data : np.ndarray[object]
encoding : str
errors : str
Handler for encoding errors.
Returns
-------
np.ndarray[fixed-length-string]
"""
# encode if needed
if len(data):
data = (
Series(data.ravel())
.str.encode(encoding, errors)
._values.reshape(data.shape)
)
# create the sized dtype
ensured = ensure_object(data.ravel())
itemsize = max(1, libwriters.max_len_string_array(ensured))
data = np.asarray(data, dtype=f"S{itemsize}")
return data
def _unconvert_string_array(
data: np.ndarray, nan_rep, encoding: str, errors: str
) -> np.ndarray:
"""
Inverse of _convert_string_array.
Parameters
----------
data : np.ndarray[fixed-length-string]
nan_rep : the storage repr of NaN
encoding : str
errors : str
Handler for encoding errors.
Returns
-------
np.ndarray[object]
Decoded data.
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
if len(data):
itemsize = libwriters.max_len_string_array(ensure_object(data))
dtype = f"U{itemsize}"
if isinstance(data[0], bytes):
data = Series(data).str.decode(encoding, errors=errors)._values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = "nan"
data = libwriters.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values: np.ndarray, val_kind: str, encoding: str, errors: str):
assert isinstance(val_kind, str), type(val_kind)
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding, errors)
values = conv(values)
return values
def _get_converter(kind: str, encoding: str, errors: str):
if kind == "datetime64":
return lambda x: np.asarray(x, dtype="M8[ns]")
elif kind == "string":
return lambda x: _unconvert_string_array(
x, nan_rep=None, encoding=encoding, errors=errors
)
else: # pragma: no cover
raise ValueError(f"invalid kind {kind}")
def _need_convert(kind: str) -> bool:
if kind in ("datetime64", "string"):
return True
return False
def _maybe_adjust_name(name: str, version: Sequence[int]) -> str:
"""
Prior to 0.10.1, we named values blocks like: values_block_0 an the
name values_0, adjust the given name if necessary.
Parameters
----------
name : str
version : Tuple[int, int, int]
Returns
-------
str
"""
if isinstance(version, str) or len(version) < 3:
raise ValueError("Version is incorrect, expected sequence of 3 integers.")
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search(r"values_block_(\d+)", name)
if m:
grp = m.groups()[0]
name = f"values_{grp}"
return name
def _dtype_to_kind(dtype_str: str) -> str:
"""
Find the "kind" string describing the given dtype name.
"""
dtype_str = _ensure_decoded(dtype_str)
if dtype_str.startswith("string") or dtype_str.startswith("bytes"):
kind = "string"
elif dtype_str.startswith("float"):
kind = "float"
elif dtype_str.startswith("complex"):
kind = "complex"
elif dtype_str.startswith("int") or dtype_str.startswith("uint"):
kind = "integer"
elif dtype_str.startswith("datetime64"):
kind = "datetime64"
elif dtype_str.startswith("timedelta"):
kind = "timedelta64"
elif dtype_str.startswith("bool"):
kind = "bool"
elif dtype_str.startswith("category"):
kind = "category"
elif dtype_str.startswith("period"):
# We store the `freq` attr so we can restore from integers
kind = "integer"
elif dtype_str == "object":
kind = "object"
else:
raise ValueError(f"cannot interpret dtype of [{dtype_str}]")
return kind
def _get_data_and_dtype_name(data: ArrayLike):
"""
Convert the passed data into a storable form and a dtype string.
"""
if isinstance(data, Categorical):
data = data.codes
# For datetime64tz we need to drop the TZ in tests TODO: why?
dtype_name = data.dtype.name.split("[")[0]
if data.dtype.kind in ["m", "M"]:
data = np.asarray(data.view("i8"))
# TODO: we used to reshape for the dt64tz case, but no longer
# doing that doesn't seem to break anything. why?
elif isinstance(data, PeriodIndex):
data = data.asi8
data = np.asarray(data)
return data, dtype_name
class Selection:
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertible to)
start, stop: indices to start and/or stop selection
"""
def __init__(
self,
table: Table,
where=None,
start: Optional[int] = None,
stop: Optional[int] = None,
):
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if is_list_like(where):
# see if we have a passed coordinate like
with suppress(ValueError):
inferred = lib.infer_dtype(where, skipna=False)
if inferred == "integer" or inferred == "boolean":
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if (self.start is not None and (where < self.start).any()) or (
self.stop is not None and (where >= self.stop).any()
):
raise ValueError(
"where must have index locations >= start and < stop"
)
self.coordinates = where
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
def generate(self, where):
""" where can be a : dict,list,tuple,string """
if where is None:
return None
q = self.table.queryables()
try:
return PyTablesExpr(where, queryables=q, encoding=self.table.encoding)
except NameError as err:
# raise a nice message, suggesting that the user should use
# data_columns
qkeys = ",".join(q.keys())
msg = dedent(
f"""\
The passed where expression: {where}
contains an invalid variable reference
all of the variable references must be a reference to
an axis (e.g. 'index' or 'columns'), or a data_column
The currently defined references are: {qkeys}
"""
)
raise ValueError(msg) from err
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(
self.condition.format(), start=self.start, stop=self.stop
)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(
self.condition.format(), start=start, stop=stop, sort=True
)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
|
bsd-3-clause
|
jjx02230808/project0223
|
examples/ensemble/plot_isolation_forest.py
|
65
|
2363
|
"""
==========================================
IsolationForest example
==========================================
An example using IsolationForest for anomaly detection.
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a measure
of abnormality and our decision function.
Random partitioning produces noticeable shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path lengths
for particular samples, they are highly likely to be anomalies.
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
rng = np.random.RandomState(42)
# Generate train data
X = 0.3 * rng.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rng.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
# plot the line, the samples, and the nearest vectors to the plane
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("IsolationForest")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([b1, b2, c],
["training observations",
"new regular observations", "new abnormal observations"],
loc="upper left")
plt.show()
|
bsd-3-clause
|
stylianos-kampakis/scikit-learn
|
examples/linear_model/plot_sgd_comparison.py
|
77
|
1820
|
"""
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
|
bsd-3-clause
|
msmbuilder/msmbuilder
|
msmbuilder/lumping/pcca.py
|
3
|
4402
|
from __future__ import print_function, division, absolute_import
import numpy as np
from ..msm import MarkovStateModel
class PCCA(MarkovStateModel):
"""Perron Cluster Cluster Analysis (PCCA) for coarse-graining (lumping)
microstates into macrostates.
Parameters
----------
n_macrostates : int
The desired number of macrostates in the lumped model.
kwargs : optional
Additional keyword arguments to be passed to MarkovStateModel. See
msmbuilder.msm.MarkovStateModel for possible options.
Notes
-----
PCCA is a subclass of MarkovStateModel. However, the MSM properties
and attributes on PCCA refer to the MICROSTATE properties--e.g.
pcca.transmat_ is the microstate transition matrix. To get the
macrostate transition matrix, you must fit a new MarkovStateModel
object on the output (assignments) of PCCA().
"""
def __init__(self, n_macrostates, objective_function=None,
pcca_tolerance=1e-5, **kwargs):
self.n_macrostates = n_macrostates
self.objective_function = objective_function
if self.objective_function is not None:
raise AttributeError("PCCA does not use an objective function")
self.pcca_tolerance = pcca_tolerance
super(PCCA, self).__init__(**kwargs)
def fit(self, sequences, y=None):
"""Fit a PCCA lumping model using a sequence of cluster assignments.
Parameters
----------
sequences : list(np.ndarray(dtype='int'))
List of arrays of cluster assignments
y : None
Unused, present for sklearn compatibility only.
Returns
-------
self
"""
super(PCCA, self).fit(sequences, y=y)
self._do_lumping()
return self
def _do_lumping(self):
"""Do the PCCA lumping.
Notes
-------
1. Iterate over the eigenvectors, starting with the slowest.
2. Calculate the spread of that eigenvector within each existing
macrostate.
3. Pick the macrostate with the largest eigenvector spread.
4. Split the macrostate based on the sign of the eigenvector.
"""
# Extract non-perron eigenvectors
right_eigenvectors = self.right_eigenvectors_[:, 1:]
assert self.n_states_ > 0
microstate_mapping = np.zeros(self.n_states_, dtype=int)
def spread(x):
return x.max() - x.min()
for i in range(self.n_macrostates - 1):
v = right_eigenvectors[:, i]
all_spreads = np.array([spread(v[microstate_mapping == k])
for k in range(i + 1)])
state_to_split = np.argmax(all_spreads)
inds = ((microstate_mapping == state_to_split) &
(v >= self.pcca_tolerance))
microstate_mapping[inds] = i + 1
self.microstate_mapping_ = microstate_mapping
def partial_transform(self, sequence, mode='clip'):
trimmed_sequence = super(PCCA, self).partial_transform(sequence, mode)
if mode == 'clip':
return [self.microstate_mapping_[seq] for seq in trimmed_sequence]
elif mode == 'fill':
def nan_get(x):
try:
x = int(x)
return self.microstate_mapping_[x]
except ValueError:
return np.nan
return np.asarray([nan_get(x) for x in trimmed_sequence])
else:
raise ValueError
@classmethod
def from_msm(cls, msm, n_macrostates, objective_function=None):
"""Create and fit lumped model from pre-existing MSM.
Parameters
----------
msm : MarkovStateModel
The input microstate msm to use.
n_macrostates : int
The number of macrostates
Returns
-------
lumper : cls
The fit PCCA(+) object.
"""
params = msm.get_params()
lumper = cls(n_macrostates=n_macrostates,
objective_function=objective_function, **params)
lumper.transmat_ = msm.transmat_
lumper.populations_ = msm.populations_
lumper.mapping_ = msm.mapping_
lumper.countsmat_ = msm.countsmat_
lumper.n_states_ = msm.n_states_
lumper._do_lumping()
return lumper
|
lgpl-2.1
|
bhargav/scikit-learn
|
sklearn/ensemble/tests/test_partial_dependence.py
|
365
|
6996
|
"""
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
|
bsd-3-clause
|
kevin-intel/scikit-learn
|
sklearn/inspection/_permutation_importance.py
|
3
|
9123
|
"""Permutation importance for estimators."""
import numpy as np
from joblib import Parallel
from ..metrics import check_scoring
from ..metrics._scorer import _check_multimetric_scoring, _MultimetricScorer
from ..model_selection._validation import _aggregate_score_dicts
from ..utils import Bunch
from ..utils import check_random_state
from ..utils import check_array
from ..utils.fixes import delayed
def _weights_scorer(scorer, estimator, X, y, sample_weight):
if sample_weight is not None:
return scorer(estimator, X, y, sample_weight)
return scorer(estimator, X, y)
def _calculate_permutation_scores(estimator, X, y, sample_weight, col_idx,
random_state, n_repeats, scorer):
"""Calculate score when `col_idx` is permuted."""
random_state = check_random_state(random_state)
# Work on a copy of X to to ensure thread-safety in case of threading based
# parallelism. Furthermore, making a copy is also useful when the joblib
# backend is 'loky' (default) or the old 'multiprocessing': in those cases,
# if X is large it will be automatically be backed by a readonly memory map
# (memmap). X.copy() on the other hand is always guaranteed to return a
# writable data-structure whose columns can be shuffled inplace.
X_permuted = X.copy()
scores = []
shuffling_idx = np.arange(X.shape[0])
for _ in range(n_repeats):
random_state.shuffle(shuffling_idx)
if hasattr(X_permuted, "iloc"):
col = X_permuted.iloc[shuffling_idx, col_idx]
col.index = X_permuted.index
X_permuted.iloc[:, col_idx] = col
else:
X_permuted[:, col_idx] = X_permuted[shuffling_idx, col_idx]
scores.append(
_weights_scorer(scorer, estimator, X_permuted, y, sample_weight)
)
if isinstance(scores[0], dict):
scores = _aggregate_score_dicts(scores)
else:
scores = np.array(scores)
return scores
def _create_importances_bunch(baseline_score, permuted_score):
"""Compute the importances as the decrease in score.
Parameters
----------
baseline_score : ndarray of shape (n_features,)
The baseline score without permutation.
permuted_score : ndarray of shape (n_features, n_repeats)
The permuted scores for the `n` repetitions.
Returns
-------
importances : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
importances_mean : ndarray, shape (n_features, )
Mean of feature importance over `n_repeats`.
importances_std : ndarray, shape (n_features, )
Standard deviation over `n_repeats`.
importances : ndarray, shape (n_features, n_repeats)
Raw permutation importance scores.
"""
importances = baseline_score - permuted_score
return Bunch(importances_mean=np.mean(importances, axis=1),
importances_std=np.std(importances, axis=1),
importances=importances)
def permutation_importance(estimator, X, y, *, scoring=None, n_repeats=5,
n_jobs=None, random_state=None, sample_weight=None):
"""Permutation importance for feature evaluation [BRE]_.
The :term:`estimator` is required to be a fitted estimator. `X` can be the
data set used to train the estimator or a hold-out set. The permutation
importance of a feature is calculated as follows. First, a baseline metric,
defined by :term:`scoring`, is evaluated on a (potentially different)
dataset defined by the `X`. Next, a feature column from the validation set
is permuted and the metric is evaluated again. The permutation importance
is defined to be the difference between the baseline metric and metric from
permutating the feature column.
Read more in the :ref:`User Guide <permutation_importance>`.
Parameters
----------
estimator : object
An estimator that has already been :term:`fitted` and is compatible
with :term:`scorer`.
X : ndarray or DataFrame, shape (n_samples, n_features)
Data on which permutation importance will be computed.
y : array-like or None, shape (n_samples, ) or (n_samples, n_classes)
Targets for supervised or `None` for unsupervised.
scoring : str, callable, list, tuple, or dict, default=None
Scorer to use.
If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_parameter`);
- a callable (see :ref:`scoring`) that returns a single value.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
Passing multiple scores to `scoring` is more efficient than calling
`permutation_importance` for each of the scores as it reuses
predictions to avoid redundant computation.
If None, the estimator's default scorer is used.
n_repeats : int, default=5
Number of times to permute a feature.
n_jobs : int or None, default=None
Number of jobs to run in parallel. The computation is done by computing
permutation score for each columns and parallelized over the columns.
`None` means 1 unless in a :obj:`joblib.parallel_backend` context.
`-1` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Pseudo-random number generator to control the permutations of each
feature.
Pass an int to get reproducible results across function calls.
See :term: `Glossary <random_state>`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights used in scoring.
.. versionadded:: 0.24
Returns
-------
result : :class:`~sklearn.utils.Bunch` or dict of such instances
Dictionary-like object, with the following attributes.
importances_mean : ndarray of shape (n_features, )
Mean of feature importance over `n_repeats`.
importances_std : ndarray of shape (n_features, )
Standard deviation over `n_repeats`.
importances : ndarray of shape (n_features, n_repeats)
Raw permutation importance scores.
If there are multiple scoring metrics in the scoring parameter
`result` is a dict with scorer names as keys (e.g. 'roc_auc') and
`Bunch` objects like above as values.
References
----------
.. [BRE] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32,
2001. https://doi.org/10.1023/A:1010933404324
Examples
--------
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import permutation_importance
>>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9],
... [0, 9, 9],[0, 9, 9],[0, 9, 9]]
>>> y = [1, 1, 1, 0, 0, 0]
>>> clf = LogisticRegression().fit(X, y)
>>> result = permutation_importance(clf, X, y, n_repeats=10,
... random_state=0)
>>> result.importances_mean
array([0.4666..., 0. , 0. ])
>>> result.importances_std
array([0.2211..., 0. , 0. ])
"""
if not hasattr(X, "iloc"):
X = check_array(X, force_all_finite='allow-nan', dtype=None)
# Precompute random seed from the random state to be used
# to get a fresh independent RandomState instance for each
# parallel call to _calculate_permutation_scores, irrespective of
# the fact that variables are shared or not depending on the active
# joblib backend (sequential, thread-based or process-based).
random_state = check_random_state(random_state)
random_seed = random_state.randint(np.iinfo(np.int32).max + 1)
if callable(scoring):
scorer = scoring
elif scoring is None or isinstance(scoring, str):
scorer = check_scoring(estimator, scoring=scoring)
else:
scorers_dict = _check_multimetric_scoring(estimator, scoring)
scorer = _MultimetricScorer(**scorers_dict)
baseline_score = _weights_scorer(scorer, estimator, X, y,
sample_weight)
scores = Parallel(n_jobs=n_jobs)(
delayed(_calculate_permutation_scores)(
estimator, X, y, sample_weight, col_idx, random_seed,
n_repeats, scorer
) for col_idx in range(X.shape[1]))
if isinstance(baseline_score, dict):
return {
name: _create_importances_bunch(
baseline_score[name],
# unpack the permuted scores
np.array([
scores[col_idx][name] for col_idx in range(X.shape[1])
])
)
for name in baseline_score
}
else:
return _create_importances_bunch(baseline_score, np.array(scores))
|
bsd-3-clause
|
tridesclous/tridesclous
|
tridesclous/gui/tests/test_peelerwindow.py
|
1
|
4059
|
from tridesclous import *
from matplotlib import pyplot
import time
import pytest
from tridesclous.tests.testingtools import ON_CI_CLOUD, setup_catalogue
from tridesclous.gui.tests.testingguitools import HAVE_QT5
if HAVE_QT5:
import pyqtgraph as pg
from tridesclous.gui import *
def setup_module():
dirname = 'test_peelerwindow'
setup_catalogue(dirname, dataset_name='olfactory_bulb')
dataio = DataIO(dirname=dirname)
initial_catalogue = dataio.load_catalogue(chan_grp=0)
peeler = Peeler(dataio)
peeler.change_params(catalogue=initial_catalogue, engine='geometrical',
chunksize=1024)
t1 = time.perf_counter()
peeler.run(progressbar=False)
t2 = time.perf_counter()
print('peeler.run_loop', t2-t1)
def get_controller():
dataio = DataIO(dirname='test_peelerwindow')
catalogueconstructor = CatalogueConstructor(dataio=dataio)
initial_catalogue = dataio.load_catalogue()
controller = PeelerController(dataio=dataio,catalogue=initial_catalogue)
return controller
def test_Peelercontroller():
controller = get_controller()
assert controller.cluster_labels is not None
@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')
def test_PeelerTraceViewer():
controller = get_controller()
app = pg.mkQApp()
traceviewer = PeelerTraceViewer(controller=controller)
traceviewer.show()
traceviewer.resize(800,600)
if __name__ == '__main__':
app.exec_()
@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')
def test_SpikeList():
controller = get_controller()
app = pg.mkQApp()
traceviewer = SpikeList(controller)
traceviewer.show()
traceviewer.resize(800,600)
if __name__ == '__main__':
app.exec_()
@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')
def test_ClusterSpikeList():
controller = get_controller()
app = pg.mkQApp()
traceviewer = ClusterSpikeList(controller)
traceviewer.show()
traceviewer.resize(800,600)
if __name__ == '__main__':
app.exec_()
@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')
def test_PeelerWaveformViewer():
controller = get_controller()
app = pg.mkQApp()
traceviewer = PeelerWaveformViewer(controller)
traceviewer.show()
traceviewer.resize(800,600)
if __name__ == '__main__':
app.exec_()
@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')
def test_ISIViewer():
controller = get_controller()
for k in controller.cluster_labels:
controller.cluster_visible[k] = False
for k in controller.cluster_labels[3:6]:
controller.cluster_visible[k] = True
#~ print(controller.cluster_visible)
app = pg.mkQApp()
isiviewer = ISIViewer(controller)
isiviewer.show()
isiviewer.refresh()
if __name__ == '__main__':
app.exec_()
@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')
def test_CrossCorrelogramViewer():
controller = get_controller()
for k in controller.cluster_labels:
controller.cluster_visible[k] = False
for k in controller.cluster_labels[3:6]:
controller.cluster_visible[k] = True
#~ print(controller.cluster_visible)
app = pg.mkQApp()
ccgviewer = CrossCorrelogramViewer(controller)
ccgviewer.show()
ccgviewer.refresh()
if __name__ == '__main__':
app.exec_()
@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')
def test_PeelerWindow():
dataio = DataIO(dirname='test_peelerwindow')
initial_catalogue = dataio.load_catalogue(chan_grp=0)
app = pg.mkQApp()
win = PeelerWindow(dataio=dataio, catalogue=initial_catalogue)
win.show()
if __name__ == '__main__':
app.exec_()
if __name__ == '__main__':
#~ setup_module()
test_Peelercontroller()
test_PeelerTraceViewer()
test_SpikeList()
test_ClusterSpikeList()
test_PeelerWaveformViewer()
test_ISIViewer()
test_CrossCorrelogramViewer()
test_PeelerWindow()
|
mit
|
jart/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py
|
30
|
70017
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testEstimatorWithCoreFeatureColumns(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(iris.data[:, i], dtype=dtypes.float32),
[-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [fc_core.numeric_column(str(i)) for i in range(4)]
linear_features = [
fc_core.bucketized_column(
cont_features[i],
sorted(set(test_data.get_quantile_based_buckets(
iris.data[:, i], 10)))) for i in range(4)
]
linear_features.append(
fc_core.categorical_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
global_step = classifier.get_variable_value('global_step')
if global_step == 100:
# Expected is 100, but because of the global step increment bug, is 50.
# Occasionally, step increments one more time due to a race condition,
# reaching 51 steps.
self.assertIn(step_counter.steps, [50, 51])
else:
# Occasionally, training stops when global_step == 102, due to a race
# condition. In addition, occasionally step increments one more time due
# to a race condition reaching 52 steps.
self.assertIn(step_counter.steps, [51, 52])
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=110)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=110)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
DonBeo/statsmodels
|
statsmodels/graphics/utils.py
|
26
|
4138
|
"""Helper functions for graphics with Matplotlib."""
from statsmodels.compat.python import lrange, range
__all__ = ['create_mpl_ax', 'create_mpl_fig']
def _import_mpl():
"""This function is not needed outside this utils module."""
try:
import matplotlib.pyplot as plt
except:
raise ImportError("Matplotlib is not found.")
return plt
def create_mpl_ax(ax=None):
"""Helper function for when a single plot axis is needed.
Parameters
----------
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
ax : Matplotlib AxesSubplot instance
The created axis if `ax` is None, otherwise the axis that was passed
in.
Notes
-----
This function imports `matplotlib.pyplot`, which should only be done to
create (a) figure(s) with ``plt.figure``. All other functionality exposed
by the pyplot module can and should be imported directly from its
Matplotlib module.
See Also
--------
create_mpl_fig
Examples
--------
A plotting function has a keyword ``ax=None``. Then calls:
>>> from statsmodels.graphics import utils
>>> fig, ax = utils.create_mpl_ax(ax)
"""
if ax is None:
plt = _import_mpl()
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.figure
return fig, ax
def create_mpl_fig(fig=None, figsize=None):
"""Helper function for when multiple plot axes are needed.
Those axes should be created in the functions they are used in, with
``fig.add_subplot()``.
Parameters
----------
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise the input `fig` is
returned.
See Also
--------
create_mpl_ax
"""
if fig is None:
plt = _import_mpl()
fig = plt.figure(figsize=figsize)
return fig
def maybe_name_or_idx(idx, model):
"""
Give a name or an integer and return the name and integer location of the
column in a design matrix.
"""
if idx is None:
idx = lrange(model.exog.shape[1])
if isinstance(idx, int):
exog_name = model.exog_names[idx]
exog_idx = idx
# anticipate index as list and recurse
elif isinstance(idx, (tuple, list)):
exog_name = []
exog_idx = []
for item in idx:
exog_name_item, exog_idx_item = maybe_name_or_idx(item, model)
exog_name.append(exog_name_item)
exog_idx.append(exog_idx_item)
else: # assume we've got a string variable
exog_name = idx
exog_idx = model.exog_names.index(idx)
return exog_name, exog_idx
def get_data_names(series_or_dataframe):
"""
Input can be an array or pandas-like. Will handle 1d array-like but not
2d. Returns a str for 1d data or a list of strings for 2d data.
"""
names = getattr(series_or_dataframe, 'name', None)
if not names:
names = getattr(series_or_dataframe, 'columns', None)
if not names:
shape = getattr(series_or_dataframe, 'shape', [1])
nvars = 1 if len(shape) == 1 else series.shape[1]
names = ["X%d" for names in range(nvars)]
if nvars == 1:
names = names[0]
else:
names = names.tolist()
return names
def annotate_axes(index, labels, points, offset_points, size, ax, **kwargs):
"""
Annotate Axes with labels, points, offset_points according to the
given index.
"""
for i in index:
label = labels[i]
point = points[i]
offset = offset_points[i]
ax.annotate(label, point, xytext=offset, textcoords="offset points",
size=size, **kwargs)
return ax
|
bsd-3-clause
|
ch3ll0v3k/scikit-learn
|
sklearn/feature_extraction/image.py
|
263
|
17600
|
"""
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
|
bsd-3-clause
|
cbertinato/pandas
|
pandas/core/config_init.py
|
1
|
17435
|
"""
This module is imported from the pandas package __init__.py file
in order to ensure that the core.config options registered here will
be available as soon as the user loads the package. if register_option
is invoked inside specific modules, they will not be registered until that
module is imported, which may or may not be a problem.
If you need to make sure options are available even before a certain
module is imported, register them here rather then in the module.
"""
import importlib
import pandas._config.config as cf
from pandas._config.config import (
is_bool, is_callable, is_instance_factory, is_int, is_one_of_factory,
is_text)
# compute
use_bottleneck_doc = """
: bool
Use the bottleneck library to accelerate if it is installed,
the default is True
Valid values: False,True
"""
def use_bottleneck_cb(key):
from pandas.core import nanops
nanops.set_use_bottleneck(cf.get_option(key))
use_numexpr_doc = """
: bool
Use the numexpr library to accelerate computation if it is installed,
the default is True
Valid values: False,True
"""
def use_numexpr_cb(key):
from pandas.core.computation import expressions
expressions.set_use_numexpr(cf.get_option(key))
with cf.config_prefix('compute'):
cf.register_option('use_bottleneck', True, use_bottleneck_doc,
validator=is_bool, cb=use_bottleneck_cb)
cf.register_option('use_numexpr', True, use_numexpr_doc,
validator=is_bool, cb=use_numexpr_cb)
#
# options from the "display" namespace
pc_precision_doc = """
: int
Floating point output precision (number of significant digits). This is
only a suggestion
"""
pc_colspace_doc = """
: int
Default space for DataFrame columns.
"""
pc_max_rows_doc = """
: int
If max_rows is exceeded, switch to truncate view. Depending on
`large_repr`, objects are either centrally truncated or printed as
a summary view. 'None' value means unlimited.
In case python/IPython is running in a terminal and `large_repr`
equals 'truncate' this can be set to 0 and pandas will auto-detect
the height of the terminal and print a truncated object which fits
the screen height. The IPython notebook, IPython qtconsole, or
IDLE do not run in a terminal and hence it is not possible to do
correct auto-detection.
"""
pc_max_cols_doc = """
: int
If max_cols is exceeded, switch to truncate view. Depending on
`large_repr`, objects are either centrally truncated or printed as
a summary view. 'None' value means unlimited.
In case python/IPython is running in a terminal and `large_repr`
equals 'truncate' this can be set to 0 and pandas will auto-detect
the width of the terminal and print a truncated object which fits
the screen width. The IPython notebook, IPython qtconsole, or IDLE
do not run in a terminal and hence it is not possible to do
correct auto-detection.
"""
pc_max_categories_doc = """
: int
This sets the maximum number of categories pandas should output when
printing out a `Categorical` or a Series of dtype "category".
"""
pc_max_info_cols_doc = """
: int
max_info_columns is used in DataFrame.info method to decide if
per column information will be printed.
"""
pc_nb_repr_h_doc = """
: boolean
When True, IPython notebook will use html representation for
pandas objects (if it is available).
"""
pc_pprint_nest_depth = """
: int
Controls the number of nested levels to process when pretty-printing
"""
pc_multi_sparse_doc = """
: boolean
"sparsify" MultiIndex display (don't display repeated
elements in outer levels within groups)
"""
float_format_doc = """
: callable
The callable should accept a floating point number and return
a string with the desired format of the number. This is used
in some places like SeriesFormatter.
See formats.format.EngFormatter for an example.
"""
max_colwidth_doc = """
: int
The maximum width in characters of a column in the repr of
a pandas data structure. When the column overflows, a "..."
placeholder is embedded in the output.
"""
colheader_justify_doc = """
: 'left'/'right'
Controls the justification of column headers. used by DataFrameFormatter.
"""
pc_expand_repr_doc = """
: boolean
Whether to print out the full DataFrame repr for wide DataFrames across
multiple lines, `max_columns` is still respected, but the output will
wrap-around across multiple "pages" if its width exceeds `display.width`.
"""
pc_show_dimensions_doc = """
: boolean or 'truncate'
Whether to print out dimensions at the end of DataFrame repr.
If 'truncate' is specified, only print out the dimensions if the
frame is truncated (e.g. not display all rows and/or columns)
"""
pc_east_asian_width_doc = """
: boolean
Whether to use the Unicode East Asian Width to calculate the display text
width.
Enabling this may affect to the performance (default: False)
"""
pc_ambiguous_as_wide_doc = """
: boolean
Whether to handle Unicode characters belong to Ambiguous as Wide (width=2)
(default: False)
"""
pc_latex_repr_doc = """
: boolean
Whether to produce a latex DataFrame representation for jupyter
environments that support it.
(default: False)
"""
pc_table_schema_doc = """
: boolean
Whether to publish a Table Schema representation for frontends
that support it.
(default: False)
"""
pc_html_border_doc = """
: int
A ``border=value`` attribute is inserted in the ``<table>`` tag
for the DataFrame HTML repr.
"""
pc_html_use_mathjax_doc = """\
: boolean
When True, Jupyter notebook will process table contents using MathJax,
rendering mathematical expressions enclosed by the dollar symbol.
(default: True)
"""
pc_width_doc = """
: int
Width of the display in characters. In case python/IPython is running in
a terminal this can be set to None and pandas will correctly auto-detect
the width.
Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a
terminal and hence it is not possible to correctly detect the width.
"""
pc_chop_threshold_doc = """
: float or None
if set to a float value, all float values smaller then the given threshold
will be displayed as exactly 0 by repr and friends.
"""
pc_max_seq_items = """
: int or None
when pretty-printing a long sequence, no more then `max_seq_items`
will be printed. If items are omitted, they will be denoted by the
addition of "..." to the resulting string.
If set to None, the number of items to be printed is unlimited.
"""
pc_max_info_rows_doc = """
: int or None
df.info() will usually show null-counts for each column.
For large frames this can be quite slow. max_info_rows and max_info_cols
limit this null check only to frames with smaller dimensions than
specified.
"""
pc_large_repr_doc = """
: 'truncate'/'info'
For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can
show a truncated table (the default from 0.13), or switch to the view from
df.info() (the behaviour in earlier versions of pandas).
"""
pc_memory_usage_doc = """
: bool, string or None
This specifies if the memory usage of a DataFrame should be displayed when
df.info() is called. Valid values True,False,'deep'
"""
pc_latex_escape = """
: bool
This specifies if the to_latex method of a Dataframe uses escapes special
characters.
Valid values: False,True
"""
pc_latex_longtable = """
:bool
This specifies if the to_latex method of a Dataframe uses the longtable
format.
Valid values: False,True
"""
pc_latex_multicolumn = """
: bool
This specifies if the to_latex method of a Dataframe uses multicolumns
to pretty-print MultiIndex columns.
Valid values: False,True
"""
pc_latex_multicolumn_format = """
: string
This specifies the format for multicolumn headers.
Can be surrounded with '|'.
Valid values: 'l', 'c', 'r', 'p{<width>}'
"""
pc_latex_multirow = """
: bool
This specifies if the to_latex method of a Dataframe uses multirows
to pretty-print MultiIndex rows.
Valid values: False,True
"""
def table_schema_cb(key):
from pandas.io.formats.printing import _enable_data_resource_formatter
_enable_data_resource_formatter(cf.get_option(key))
def is_terminal():
"""
Detect if Python is running in a terminal.
Returns True if Python is running in a terminal or False if not.
"""
try:
ip = get_ipython()
except NameError: # assume standard Python interpreter in a terminal
return True
else:
if hasattr(ip, 'kernel'): # IPython as a Jupyter kernel
return False
else: # IPython in a terminal
return True
with cf.config_prefix('display'):
cf.register_option('precision', 6, pc_precision_doc, validator=is_int)
cf.register_option('float_format', None, float_format_doc,
validator=is_one_of_factory([None, is_callable]))
cf.register_option('column_space', 12, validator=is_int)
cf.register_option('max_info_rows', 1690785, pc_max_info_rows_doc,
validator=is_instance_factory((int, type(None))))
cf.register_option('max_rows', 60, pc_max_rows_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('max_categories', 8, pc_max_categories_doc,
validator=is_int)
cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_int)
if is_terminal():
max_cols = 0 # automatically determine optimal number of columns
else:
max_cols = 20 # cannot determine optimal number of columns
cf.register_option('max_columns', max_cols, pc_max_cols_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('large_repr', 'truncate', pc_large_repr_doc,
validator=is_one_of_factory(['truncate', 'info']))
cf.register_option('max_info_columns', 100, pc_max_info_cols_doc,
validator=is_int)
cf.register_option('colheader_justify', 'right', colheader_justify_doc,
validator=is_text)
cf.register_option('notebook_repr_html', True, pc_nb_repr_h_doc,
validator=is_bool)
cf.register_option('pprint_nest_depth', 3, pc_pprint_nest_depth,
validator=is_int)
cf.register_option('multi_sparse', True, pc_multi_sparse_doc,
validator=is_bool)
cf.register_option('expand_frame_repr', True, pc_expand_repr_doc)
cf.register_option('show_dimensions', 'truncate', pc_show_dimensions_doc,
validator=is_one_of_factory([True, False, 'truncate']))
cf.register_option('chop_threshold', None, pc_chop_threshold_doc)
cf.register_option('max_seq_items', 100, pc_max_seq_items)
cf.register_option('width', 80, pc_width_doc,
validator=is_instance_factory([type(None), int]))
cf.register_option('memory_usage', True, pc_memory_usage_doc,
validator=is_one_of_factory([None, True,
False, 'deep']))
cf.register_option('unicode.east_asian_width', False,
pc_east_asian_width_doc, validator=is_bool)
cf.register_option('unicode.ambiguous_as_wide', False,
pc_east_asian_width_doc, validator=is_bool)
cf.register_option('latex.repr', False,
pc_latex_repr_doc, validator=is_bool)
cf.register_option('latex.escape', True, pc_latex_escape,
validator=is_bool)
cf.register_option('latex.longtable', False, pc_latex_longtable,
validator=is_bool)
cf.register_option('latex.multicolumn', True, pc_latex_multicolumn,
validator=is_bool)
cf.register_option('latex.multicolumn_format', 'l', pc_latex_multicolumn,
validator=is_text)
cf.register_option('latex.multirow', False, pc_latex_multirow,
validator=is_bool)
cf.register_option('html.table_schema', False, pc_table_schema_doc,
validator=is_bool, cb=table_schema_cb)
cf.register_option('html.border', 1, pc_html_border_doc,
validator=is_int)
cf.register_option('html.use_mathjax', True, pc_html_use_mathjax_doc,
validator=is_bool)
tc_sim_interactive_doc = """
: boolean
Whether to simulate interactive mode for purposes of testing
"""
with cf.config_prefix('mode'):
cf.register_option('sim_interactive', False, tc_sim_interactive_doc)
use_inf_as_null_doc = """
: boolean
use_inf_as_null had been deprecated and will be removed in a future
version. Use `use_inf_as_na` instead.
"""
use_inf_as_na_doc = """
: boolean
True means treat None, NaN, INF, -INF as NA (old way),
False means None and NaN are null, but INF, -INF are not NA
(new way).
"""
# We don't want to start importing everything at the global context level
# or we'll hit circular deps.
def use_inf_as_na_cb(key):
from pandas.core.dtypes.missing import _use_inf_as_na
_use_inf_as_na(key)
with cf.config_prefix('mode'):
cf.register_option('use_inf_as_na', False, use_inf_as_na_doc,
cb=use_inf_as_na_cb)
cf.register_option('use_inf_as_null', False, use_inf_as_null_doc,
cb=use_inf_as_na_cb)
cf.deprecate_option('mode.use_inf_as_null', msg=use_inf_as_null_doc,
rkey='mode.use_inf_as_na')
# user warnings
chained_assignment = """
: string
Raise an exception, warn, or no action if trying to use chained assignment,
The default is warn
"""
with cf.config_prefix('mode'):
cf.register_option('chained_assignment', 'warn', chained_assignment,
validator=is_one_of_factory([None, 'warn', 'raise']))
# Set up the io.excel specific configuration.
writer_engine_doc = """
: string
The default Excel writer engine for '{ext}' files. Available options:
auto, {others}.
"""
_xls_options = ['xlwt']
_xlsm_options = ['openpyxl']
_xlsx_options = ['openpyxl', 'xlsxwriter']
with cf.config_prefix("io.excel.xls"):
cf.register_option("writer", "auto",
writer_engine_doc.format(
ext='xls',
others=', '.join(_xls_options)),
validator=str)
with cf.config_prefix("io.excel.xlsm"):
cf.register_option("writer", "auto",
writer_engine_doc.format(
ext='xlsm',
others=', '.join(_xlsm_options)),
validator=str)
with cf.config_prefix("io.excel.xlsx"):
cf.register_option("writer", "auto",
writer_engine_doc.format(
ext='xlsx',
others=', '.join(_xlsx_options)),
validator=str)
# Set up the io.parquet specific configuration.
parquet_engine_doc = """
: string
The default parquet reader/writer engine. Available options:
'auto', 'pyarrow', 'fastparquet', the default is 'auto'
"""
with cf.config_prefix('io.parquet'):
cf.register_option(
'engine', 'auto', parquet_engine_doc,
validator=is_one_of_factory(['auto', 'pyarrow', 'fastparquet']))
# --------
# Plotting
# ---------
plotting_backend_doc = """
: str
The plotting backend to use. The default value is "matplotlib", the
backend provided with pandas. Other backends can be specified by
prodiving the name of the module that implements the backend.
"""
def register_plotting_backend_cb(key):
backend_str = cf.get_option(key)
if backend_str == 'matplotlib':
try:
import pandas.plotting._matplotlib # noqa
except ImportError:
raise ImportError('matplotlib is required for plotting when the '
'default backend "matplotlib" is selected.')
else:
return
try:
importlib.import_module(backend_str)
except ImportError:
raise ValueError('"{}" does not seem to be an installed module. '
'A pandas plotting backend must be a module that '
'can be imported'.format(backend_str))
with cf.config_prefix('plotting'):
cf.register_option('backend', defval='matplotlib',
doc=plotting_backend_doc,
validator=str,
cb=register_plotting_backend_cb)
register_converter_doc = """
: bool
Whether to register converters with matplotlib's units registry for
dates, times, datetimes, and Periods. Toggling to False will remove
the converters, restoring any converters that pandas overwrote.
"""
def register_converter_cb(key):
from pandas.plotting import register_matplotlib_converters
from pandas.plotting import deregister_matplotlib_converters
if cf.get_option(key):
register_matplotlib_converters()
else:
deregister_matplotlib_converters()
with cf.config_prefix("plotting.matplotlib"):
cf.register_option("register_converters", True, register_converter_doc,
validator=bool, cb=register_converter_cb)
|
bsd-3-clause
|
lidalei/DataMining
|
challenge.py
|
1
|
2158
|
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier
from sklearn.svm import SVC
from sklearn.tree import ExtraTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.grid_search import GridSearchCV
from openml import tasks, runs
import xmltodict
import numpy as np
def challenge():
## use dev openml to run
# Download task, run learner, publish results
task = tasks.get_task(14951)
## clf = BaggingClassifier(SVC(), n_estimators = 128)
'''
clf = RandomForestClassifier(n_estimators = 128, class_weight = 'balanced_subsample')
'''
'''
clf = BaggingClassifier(ExtraTreeClassifier(), n_estimators = 20)
'''
'''
param_grid = {'max_depth': np.linspace(1, 15, num = 15, dtype = np.int64),
'class_weight': ['balanced', 'balanced_subsample', None],
'min_samples_split': np.linspace(1, 15, num = 15, dtype = np.int64),
'criterion': ['gini', 'entropy']
}
base_clf = RandomForestClassifier(n_estimators = 20)
clf = GridSearchCV(base_clf, param_grid = param_grid, scoring = 'roc_auc',
cv = 10, pre_dispatch = '2*n_jobs', n_jobs = 4)
'''
'''
## grid search - gamma and C, grid_den = 20, time needed = 13.36s
grid_den = 1
param_grid = {#'C': np.logspace(-5, 5, num = grid_den, base = 2.0),
'gamma': np.logspace(-5, 5, num = grid_den, base = 2.0)
}
clf = GridSearchCV(SVC(probability = True), param_grid = param_grid, scoring = 'roc_auc',
cv = 10, pre_dispatch = '2*n_jobs', n_jobs = 4)
'''
clf = KNeighborsClassifier(n_neighbors = 5, algorithm = 'brute', metric = 'cosine')
run = runs.run_task(task, clf)
return_code, response = run.publish()
# get the run id for reference
if(return_code == 200):
response_dict = xmltodict.parse(response)
run_id = response_dict['oml:upload_run']['oml:run_id']
print("Uploaded run with id %s. Check it at www.openml.org/r/%s" % (run_id,run_id))
if __name__ == '__main__':
challenge()
|
mit
|
sytjyjj/Group-Homeproject1
|
setup.py
|
3
|
1355
|
#!/usr/bin/env python
from setuptools import setup
from fredapi.version import version as __version__
requires = ['pandas']
# README = open('README.rst').read()
# CHANGELOG = open('docs/changelog.rst').read()
LONG_DESCRIPTION = open('DESCRIPTION.rst').read()
setup(
name="fredapi",
version=__version__,
url='https://github.com/mortada/fredapi',
author='Mortada Mehyar',
# author_email='',
description="Python API for Federal Reserve Economic Data (FRED) from St. Louis Fed",
long_description=LONG_DESCRIPTION,
test_suite='fredapi.tests.test_fred',
packages=['fredapi'],
platforms=["Any"],
install_requires=requires,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
apache-2.0
|
ivoflipse/devide
|
module_kits/matplotlib_kit/__init__.py
|
7
|
2895
|
# $Id: __init__.py 1945 2006-03-05 01:06:37Z cpbotha $
# importing this module shouldn't directly cause other large imports
# do large imports in the init() hook so that you can call back to the
# ModuleManager progress handler methods.
"""matplotlib_kit package driver file.
Inserts the following modules in sys.modules: matplotlib, pylab.
@author: Charl P. Botha <http://cpbotha.net/>
"""
import os
import re
import sys
import types
# you have to define this
VERSION = ''
def init(theModuleManager, pre_import=True):
if hasattr(sys, 'frozen') and sys.frozen:
# matplotlib supports py2exe by checking for matplotlibdata in the appdir
# but this is only done on windows (and therefore works for our windows
# installer builds). On non-windows, we have to stick it in the env
# to make sure that MPL finds its datadir (only if we're frozen)
mpldir = os.path.join(theModuleManager.get_appdir(), 'matplotlibdata')
os.environ['MATPLOTLIBDATA'] = mpldir
# import the main module itself
# this doesn't import numerix yet...
global matplotlib
import matplotlib
# use WX + Agg backend (slower, but nicer that WX)
matplotlib.use('WXAgg')
# interactive mode: user can use pylab commands from any introspection
# interface, changes will be made immediately and matplotlib cooperates
# nicely with main WX event loop
matplotlib.interactive(True)
# with matplotlib 1.0.1 we can't do this anymore.
# makes sure we use the numpy backend
#from matplotlib import rcParams
#rcParams['numerix'] = 'numpy'
theModuleManager.setProgress(25, 'Initialising matplotlib_kit: config')
# @PATCH:
# this is for the combination numpy 1.0.4 and matplotlib 0.91.2
# matplotlib/numerix/ma/__init__.py:
# . normal installation fails on "from numpy.ma import *", so "from
# numpy.core.ma import *" is done, thus bringing in e.g. getmask
# . pyinstaller binaries for some or other reason succeed on
# "from numpy.ma import *" (no exception raised), therefore do
# not do "from numpy.core.ma import *", and therefore things like
# getmask are not imported.
# solution:
# we make sure that "from numpy.ma import *" actually brings in
# numpy.core.ma by importing that and associating the module
# binding to the global numpy.ma.
#if hasattr(sys, 'frozen') and sys.frozen:
# import numpy.core.ma
# sys.modules['numpy.ma'] = sys.modules['numpy.core.ma']
# import the pylab interface, make sure it's available from this namespace
global pylab
import pylab
theModuleManager.setProgress(90, 'Initialising matplotlib_kit: pylab')
# build up VERSION
global VERSION
VERSION = '%s' % (matplotlib.__version__,)
theModuleManager.setProgress(100, 'Initialising matplotlib_kit: complete')
|
bsd-3-clause
|
r24mille/ldc_analysis
|
zonal_demand/run.py
|
1
|
1883
|
#!/usr/bin/env python
import os
import matplotlib
import numpy
import matplotlib.pyplot as plt
from zonal_demand.models import ZonalDemand
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ldc_analysis.settings")
# Get timeseries of TransformerLoad associated with a Transformer
demands = ZonalDemand.objects.using('zonal').all()
# Find the length of the results to know first and last index, then get the number of days
num_loads = len(demands)
start_date = demands[0].demand_datetime_dst
end_date = demands[num_loads - 1].demand_datetime_dst
num_days = (end_date - start_date).days
# Create an n x 24 matrix to hold each hour's values and zeros for hours without value
load_heatmap = numpy.zeros(((num_days + 1), 24))
# Loop over all TransformerLoad rows returned and populate them into the proper array slot
for zonalDemand in demands:
d = (zonalDemand.demand_datetime_dst - start_date).days
load_heatmap[d][zonalDemand.hour - 1] = zonalDemand.total_ontario
# Plot the timeseries matrix as a heatmap
start_datenum = matplotlib.dates.date2num(start_date)
end_datenum = matplotlib.dates.date2num(end_date)
fig = plt.figure()
ax = plt.subplot()
# plt.subplots_adjust(left=0.2, bottom=None, right=1, top=None, wspace=None, hspace=None)
im = ax.imshow(load_heatmap, interpolation='none', aspect='auto', extent=(0, 24, start_datenum, end_datenum), origin='lower', vmin=9000, vmax=27000)
ax.yaxis_date()
ax.set_title("Ontario Demand")
ax.set_xlabel("Hour of Day")
ax.set_ylabel("Day of Year")
ax.xaxis.set_ticks(range(0, 24, 2))
cb = fig.colorbar(im)
cb.set_label("Kilowatt-hours (kWh)")
# plt.savefig("./figures/ontario.png", dpi=100)
# plt.close()
plt.show()
|
apache-2.0
|
Djabbz/scikit-learn
|
examples/model_selection/grid_search_digits.py
|
227
|
2665
|
"""
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
|
bsd-3-clause
|
kaiserroll14/301finalproject
|
main/pandas/stats/math.py
|
25
|
3253
|
# pylint: disable-msg=E1103
# pylint: disable-msg=W0212
from __future__ import division
from pandas.compat import range
import numpy as np
import numpy.linalg as linalg
def rank(X, cond=1.0e-12):
"""
Return the rank of a matrix X based on its generalized inverse,
not the SVD.
"""
X = np.asarray(X)
if len(X.shape) == 2:
import scipy.linalg as SL
D = SL.svdvals(X)
result = np.add.reduce(np.greater(D / D.max(), cond))
return int(result.astype(np.int32))
else:
return int(not np.alltrue(np.equal(X, 0.)))
def solve(a, b):
"""Returns the solution of A X = B."""
try:
return linalg.solve(a, b)
except linalg.LinAlgError:
return np.dot(linalg.pinv(a), b)
def inv(a):
"""Returns the inverse of A."""
try:
return np.linalg.inv(a)
except linalg.LinAlgError:
return np.linalg.pinv(a)
def is_psd(m):
eigvals = linalg.eigvals(m)
return np.isreal(eigvals).all() and (eigvals >= 0).all()
def newey_west(m, max_lags, nobs, df, nw_overlap=False):
"""
Compute Newey-West adjusted covariance matrix, taking into account
specified number of leads / lags
Parameters
----------
m : (N x K)
max_lags : int
nobs : int
Number of observations in model
df : int
Degrees of freedom in explanatory variables
nw_overlap : boolean, default False
Assume data is overlapping
Returns
-------
ndarray (K x K)
Reference
---------
Newey, W. K. & West, K. D. (1987) A Simple, Positive
Semi-definite, Heteroskedasticity and Autocorrelation Consistent
Covariance Matrix, Econometrica, vol. 55(3), 703-708
"""
Xeps = np.dot(m.T, m)
for lag in range(1, max_lags + 1):
auto_cov = np.dot(m[:-lag].T, m[lag:])
weight = lag / (max_lags + 1)
if nw_overlap:
weight = 0
bb = auto_cov + auto_cov.T
dd = (1 - weight) * bb
Xeps += dd
Xeps *= nobs / (nobs - df)
if nw_overlap and not is_psd(Xeps):
new_max_lags = int(np.ceil(max_lags * 1.5))
# print('nw_overlap is True and newey_west generated a non positive '
# 'semidefinite matrix, so using newey_west with max_lags of %d.'
# % new_max_lags)
return newey_west(m, new_max_lags, nobs, df)
return Xeps
def calc_F(R, r, beta, var_beta, nobs, df):
"""
Computes the standard F-test statistic for linear restriction
hypothesis testing
Parameters
----------
R: ndarray (N x N)
Restriction matrix
r: ndarray (N x 1)
Restriction vector
beta: ndarray (N x 1)
Estimated model coefficients
var_beta: ndarray (N x N)
Variance covariance matrix of regressors
nobs: int
Number of observations in model
df: int
Model degrees of freedom
Returns
-------
F value, (q, df_resid), p value
"""
from scipy.stats import f
hyp = np.dot(R, beta.reshape(len(beta), 1)) - r
RSR = np.dot(R, np.dot(var_beta, R.T))
q = len(r)
F = np.dot(hyp.T, np.dot(inv(RSR), hyp)).squeeze() / q
p_value = 1 - f.cdf(F, q, nobs - df)
return F, (q, nobs - df), p_value
|
gpl-3.0
|
ZENGXH/scikit-learn
|
sklearn/linear_model/tests/test_omp.py
|
272
|
7752
|
# Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
|
bsd-3-clause
|
HolgerPeters/scikit-learn
|
sklearn/tree/tests/test_export.py
|
33
|
9901
|
"""
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in, assert_equal, assert_raises
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
y_degraded = [1, 1, 1, 1, 1, 1]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
contents1 = export_graphviz(clf, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
contents1 = export_graphviz(clf, feature_names=["feature0", "feature1"],
out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
contents1 = export_graphviz(clf, class_names=["yes", "no"], out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
contents1 = export_graphviz(clf, filled=True, impurity=False,
proportion=True, special_characters=True,
rounded=True, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
contents1 = export_graphviz(clf, max_depth=0,
class_names=True, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
contents1 = export_graphviz(clf, max_depth=0, filled=True,
out_file=None, node_ids=True)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=2,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
contents1 = export_graphviz(clf, filled=True,
impurity=False, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[3.0, 1.0, 0.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n' \
'[3, 0, 0]]", fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n' \
'[0.0, 1.0, 0.5]]", fillcolor="#e5813986"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n' \
'[0, 1, 0]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 3 ;\n' \
'4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 4 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=2,
criterion="mse",
random_state=2)
clf.fit(X, y)
contents1 = export_graphviz(clf, filled=True, leaves_parallel=True,
out_file=None, rotate=True, rounded=True)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e5813980"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
# Test classifier with degraded learning set
clf = DecisionTreeClassifier(max_depth=3)
clf.fit(X, y_degraded)
contents1 = export_graphviz(clf, filled=True, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="gini = 0.0\\nsamples = 6\\nvalue = 6.0", ' \
'fillcolor="#e5813900"] ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
|
bsd-3-clause
|
NunoEdgarGub1/scikit-learn
|
sklearn/utils/tests/test_fixes.py
|
281
|
1829
|
# Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
|
bsd-3-clause
|
kiyoto/statsmodels
|
statsmodels/tools/tests/test_pca.py
|
25
|
13934
|
from __future__ import print_function, division
from unittest import TestCase
import warnings
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_raises
from numpy.testing.decorators import skipif
import pandas as pd
try:
import matplotlib.pyplot as plt
missing_matplotlib = False
except ImportError:
missing_matplotlib = True
from statsmodels.tools.pca import PCA
from statsmodels.tools.tests.results.datamlw import data, princomp1, princomp2
from statsmodels.compat.numpy import nanmean
DECIMAL_5 = .00001
class TestPCA(TestCase):
@classmethod
def setUpClass(cls):
rs = np.random.RandomState()
rs.seed(1234)
k = 3
n = 100
t = 200
lam = 2
norm_rng = rs.standard_normal
e = norm_rng((t, n))
f = norm_rng((t, k))
b = rs.standard_gamma(lam, size=(k, n)) / lam
cls.x = f.dot(b) + e
cls.x_copy = cls.x + 0.0
cls.rs = rs
k = 3
n = 300
t = 200
lam = 2
norm_rng = rs.standard_normal
e = norm_rng((t, n))
f = norm_rng((t, k))
b = rs.standard_gamma(lam, size=(k, n)) / lam
cls.x_wide = f.dot(b) + e
@skipif(missing_matplotlib)
def test_smoke_plot_and_repr(self):
pc = PCA(self.x)
fig = pc.plot_scree()
fig = pc.plot_scree(ncomp=10)
fig = pc.plot_scree(log_scale=False)
fig = pc.plot_scree(cumulative=True)
fig = pc.plot_rsquare()
fig = pc.plot_rsquare(ncomp=5)
# Additional smoke test
pc.__repr__()
pc = PCA(self.x, standardize=False)
pc.__repr__()
pc = PCA(self.x, standardize=False, demean=False)
pc.__repr__()
# Check data for no changes
assert_equal(self.x, pc.data)
def test_eig_svd_equiv(self):
"""
Test leading components since the tail end can differ
"""
pc_eig = PCA(self.x)
pc_svd = PCA(self.x, method='svd')
assert_allclose(pc_eig.projection, pc_svd.projection)
assert_allclose(np.abs(pc_eig.factors[:, :2]),
np.abs(pc_svd.factors[:, :2]))
assert_allclose(np.abs(pc_eig.coeff[:2, :]),
np.abs(pc_svd.coeff[:2, :]))
assert_allclose(pc_eig.eigenvals,
pc_svd.eigenvals)
assert_allclose(np.abs(pc_eig.eigenvecs[:, :2]),
np.abs(pc_svd.eigenvecs[:, :2]))
pc_svd = PCA(self.x, method='svd', ncomp=2)
pc_nipals = PCA(self.x, method='nipals', ncomp=2)
assert_allclose(np.abs(pc_nipals.factors),
np.abs(pc_svd.factors),
atol=DECIMAL_5)
assert_allclose(np.abs(pc_nipals.coeff),
np.abs(pc_svd.coeff),
atol=DECIMAL_5)
assert_allclose(pc_nipals.eigenvals,
pc_svd.eigenvals,
atol=DECIMAL_5)
assert_allclose(np.abs(pc_nipals.eigenvecs),
np.abs(pc_svd.eigenvecs),
atol=DECIMAL_5)
# Check data for no changes
assert_equal(self.x, pc_svd.data)
# Check data for no changes
assert_equal(self.x, pc_eig.data)
# Check data for no changes
assert_equal(self.x, pc_nipals.data)
def test_options(self):
pc = PCA(self.x)
pc_no_norm = PCA(self.x, normalize=False)
assert_allclose(pc.factors.dot(pc.coeff),
pc_no_norm.factors.dot(pc_no_norm.coeff))
princomp = pc.factors
assert_allclose(princomp.T.dot(princomp), np.eye(100), atol=1e-5)
weights = pc_no_norm.coeff
assert_allclose(weights.T.dot(weights), np.eye(100), atol=1e-5)
pc_10 = PCA(self.x, ncomp=10)
assert_allclose(pc.factors[:, :10], pc_10.factors)
assert_allclose(pc.coeff[:10, :], pc_10.coeff)
assert_allclose(pc.rsquare[:(10 + 1)], pc_10.rsquare)
assert_allclose(pc.eigenvals[:10], pc_10.eigenvals)
assert_allclose(pc.eigenvecs[:, :10], pc_10.eigenvecs)
pc = PCA(self.x, standardize=False, normalize=False)
mu = self.x.mean(0)
xdm = self.x - mu
xpx = xdm.T.dot(xdm)
val, vec = np.linalg.eigh(xpx)
ind = np.argsort(val)
ind = ind[::-1]
val = val[ind]
vec = vec[:, ind]
assert_allclose(xdm, pc.transformed_data)
assert_allclose(val, pc.eigenvals)
assert_allclose(np.abs(vec), np.abs(pc.eigenvecs))
assert_allclose(np.abs(pc.factors), np.abs(xdm.dot(vec)))
assert_allclose(pc.projection, xdm + mu)
pc = PCA(self.x, standardize=False, demean=False, normalize=False)
x = self.x
xpx = x.T.dot(x)
val, vec = np.linalg.eigh(xpx)
ind = np.argsort(val)
ind = ind[::-1]
val = val[ind]
vec = vec[:, ind]
assert_allclose(x, pc.transformed_data)
assert_allclose(val, pc.eigenvals)
assert_allclose(np.abs(vec), np.abs(pc.eigenvecs))
assert_allclose(np.abs(pc.factors), np.abs(x.dot(vec)))
def test_against_reference(self):
"""
Test against MATLAB, which by default demeans but does not standardize
"""
x = data.xo / 1000.0
pc = PCA(x, normalize=False, standardize=False)
ref = princomp1
assert_allclose(np.abs(pc.factors), np.abs(ref.factors))
assert_allclose(pc.factors.dot(pc.coeff) + x.mean(0), x)
assert_allclose(np.abs(pc.coeff), np.abs(ref.coef.T))
assert_allclose(pc.factors.dot(pc.coeff),
ref.factors.dot(ref.coef.T))
pc = PCA(x[:20], normalize=False, standardize=False)
mu = x[:20].mean(0)
ref = princomp2
assert_allclose(np.abs(pc.factors), np.abs(ref.factors))
assert_allclose(pc.factors.dot(pc.coeff) + mu, x[:20])
assert_allclose(np.abs(pc.coeff), np.abs(ref.coef.T))
assert_allclose(pc.factors.dot(pc.coeff),
ref.factors.dot(ref.coef.T))
def test_warnings_and_errors(self):
with warnings.catch_warnings(record=True) as w:
pc = PCA(self.x, ncomp=300)
assert_equal(len(w), 1)
with warnings.catch_warnings(record=True) as w:
rs = self.rs
x = rs.standard_normal((200, 1)) * np.ones(200)
pc = PCA(x, method='eig')
assert_equal(len(w), 1)
assert_raises(ValueError, PCA, self.x, method='unknown')
assert_raises(ValueError, PCA, self.x, missing='unknown')
assert_raises(ValueError, PCA, self.x, tol=2.0)
assert_raises(ValueError, PCA, np.nan * np.ones((200,100)), tol=2.0)
@skipif(missing_matplotlib)
def test_pandas(self):
pc = PCA(pd.DataFrame(self.x))
pc1 = PCA(self.x)
assert_equal(pc.factors.values, pc1.factors)
fig = pc.plot_scree()
fig = pc.plot_scree(ncomp=10)
fig = pc.plot_scree(log_scale=False)
fig = pc.plot_rsquare()
fig = pc.plot_rsquare(ncomp=5)
proj = pc.project(2)
PCA(pd.DataFrame(self.x), ncomp=4, gls=True)
PCA(pd.DataFrame(self.x), ncomp=4, standardize=False)
def test_gls_and_weights(self):
assert_raises(ValueError, PCA, self.x, gls=True)
assert_raises(ValueError, PCA, self.x, weights=np.array([1.0, 1.0]))
# Pre-standardize to make comparison simple
x = (self.x - self.x.mean(0))
x = x / (x ** 2.0).mean(0)
pc_gls = PCA(x, ncomp=1, standardize=False, demean=False, gls=True)
pc = PCA(x, ncomp=1, standardize=False, demean=False)
errors = x - pc.projection
var = (errors ** 2.0).mean(0)
weights = 1.0 / var
weights = weights / np.sqrt((weights ** 2.0).mean())
assert_allclose(weights, pc_gls.weights)
assert_equal(x, pc_gls.data)
assert_equal(x, pc.data)
pc_weights = PCA(x, ncomp=1, standardize=False, demean=False, weights=weights)
assert_allclose(weights, pc_weights.weights)
assert_allclose(np.abs(pc_weights.factors), np.abs(pc_gls.factors))
def test_wide(self):
pc = PCA(self.x_wide)
assert_equal(pc.factors.shape[1], self.x_wide.shape[0])
assert_equal(pc.eigenvecs.shape[1], min(np.array(self.x_wide.shape)))
pc = PCA(pd.DataFrame(self.x_wide))
assert_equal(pc.factors.shape[1], self.x_wide.shape[0])
assert_equal(pc.eigenvecs.shape[1], min(np.array(self.x_wide.shape)))
def test_projection(self):
pc = PCA(self.x, ncomp=5)
mu = self.x.mean(0)
demean_x = self.x - mu
coef = np.linalg.pinv(pc.factors).dot(demean_x)
direct = pc.factors.dot(coef)
assert_allclose(pc.projection, direct + mu)
pc = PCA(self.x, standardize=False, ncomp=5)
coef = np.linalg.pinv(pc.factors).dot(demean_x)
direct = pc.factors.dot(coef)
assert_allclose(pc.projection, direct + mu)
pc = PCA(self.x, standardize=False, demean=False, ncomp=5)
coef = np.linalg.pinv(pc.factors).dot(self.x)
direct = pc.factors.dot(coef)
assert_allclose(pc.projection, direct)
pc = PCA(self.x, ncomp=5, gls=True)
mu = self.x.mean(0)
demean_x = self.x - mu
coef = np.linalg.pinv(pc.factors).dot(demean_x)
direct = pc.factors.dot(coef)
assert_allclose(pc.projection, direct + mu)
pc = PCA(self.x, standardize=False, ncomp=5)
coef = np.linalg.pinv(pc.factors).dot(demean_x)
direct = pc.factors.dot(coef)
assert_allclose(pc.projection, direct + mu)
pc = PCA(self.x, standardize=False, demean=False, ncomp=5, gls=True)
coef = np.linalg.pinv(pc.factors).dot(self.x)
direct = pc.factors.dot(coef)
assert_allclose(pc.projection, direct)
# Test error for too many factors
project = pc.project
assert_raises(ValueError, project, 6)
def test_replace_missing(self):
x = self.x.copy()
x[::5, ::7] = np.nan
pc = PCA(x, missing='drop-row')
x_dropped_row = x[np.logical_not(np.any(np.isnan(x), 1))]
pc_dropped = PCA(x_dropped_row)
assert_equal(pc.projection, pc_dropped.projection)
assert_equal(x, pc.data)
pc = PCA(x, missing='drop-col')
x_dropped_col = x[:, np.logical_not(np.any(np.isnan(x), 0))]
pc_dropped = PCA(x_dropped_col)
assert_equal(pc.projection, pc_dropped.projection)
assert_equal(x, pc.data)
pc = PCA(x, missing='drop-min')
if x_dropped_row.size > x_dropped_col.size:
x_dropped_min = x_dropped_row
else:
x_dropped_min = x_dropped_col
pc_dropped = PCA(x_dropped_min)
assert_equal(pc.projection, pc_dropped.projection)
assert_equal(x, pc.data)
pc = PCA(x, ncomp=3, missing='fill-em')
missing = np.isnan(x)
mu = nanmean(x, axis=0)
errors = x - mu
sigma = np.sqrt(nanmean(errors ** 2, axis=0))
x_std = errors / sigma
x_std[missing] = 0.0
last = x_std[missing]
delta = 1.0
count = 0
while delta > 5e-8:
pc_temp = PCA(x_std, ncomp=3, standardize=False, demean=False)
x_std[missing] = pc_temp.projection[missing]
current = x_std[missing]
diff = current - last
delta = np.sqrt(np.sum(diff ** 2)) / np.sqrt(np.sum(current ** 2))
last = current
count += 1
x = self.x + 0.0
projection = pc_temp.projection * sigma + mu
x[missing] = projection[missing]
assert_allclose(pc._adjusted_data, x)
# Check data for no changes
assert_equal(self.x, self.x_copy)
x = self.x
pc = PCA(x)
pc_dropped = PCA(x, missing='drop-row')
assert_allclose(pc.projection, pc_dropped.projection, atol=DECIMAL_5)
pc_dropped = PCA(x, missing='drop-col')
assert_allclose(pc.projection, pc_dropped.projection, atol=DECIMAL_5)
pc_dropped = PCA(x, missing='drop-min')
assert_allclose(pc.projection, pc_dropped.projection, atol=DECIMAL_5)
pc = PCA(x, ncomp=3)
pc_dropped = PCA(x, ncomp=3, missing='fill-em')
assert_allclose(pc.projection, pc_dropped.projection, atol=DECIMAL_5)
# Test too many missing for missing='fill-em'
x = self.x.copy()
x[:, :] = np.nan
assert_raises(ValueError, PCA, x, missing='drop-row')
assert_raises(ValueError, PCA, x, missing='drop-col')
assert_raises(ValueError, PCA, x, missing='drop-min')
assert_raises(ValueError, PCA, x, missing='fill-em')
def test_rsquare(self):
x = self.x + 0.0
mu = x.mean(0)
x_demean = x - mu
std = np.std(x, 0)
x_std = x_demean / std
pc = PCA(self.x)
nvar = x.shape[1]
rsquare = np.zeros(nvar + 1)
tss = np.sum(x_std ** 2)
for i in range(nvar + 1):
errors = x_std - pc.project(i, transform=False, unweight=False)
rsquare[i] = 1.0 - np.sum(errors ** 2) / tss
assert_allclose(rsquare, pc.rsquare)
pc = PCA(self.x, standardize=False)
tss = np.sum(x_demean ** 2)
for i in range(nvar + 1):
errors = x_demean - pc.project(i, transform=False, unweight=False)
rsquare[i] = 1.0 - np.sum(errors ** 2) / tss
assert_allclose(rsquare, pc.rsquare)
pc = PCA(self.x, standardize=False, demean=False)
tss = np.sum(x ** 2)
for i in range(nvar + 1):
errors = x - pc.project(i, transform=False, unweight=False)
rsquare[i] = 1.0 - np.sum(errors ** 2) / tss
assert_allclose(rsquare, pc.rsquare)
|
bsd-3-clause
|
DarkEnergyScienceCollaboration/SLTimer
|
python/desc/sltimer/worker.py
|
2
|
23802
|
# ======================================================================
# License info here?
# ======================================================================
from __future__ import absolute_import
import os
import urllib
import subprocess
import pycs
import numpy as np
from .reading import *
from matplotlib import pyplot as plt
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
__all__ = ['SLTimer', 'spl']
class SLTimer(object):
'''
Worker class for ingesting strongly lensed image light curves, and
measuring the time delays between them.
'''
def __init__(self):
self.agn = None
self.microlensing = None
self.time_delays = None
self.datafile = None
self.lcs = None
self.ml_knotstep = 350
self.knotstep = 20
self.Hbar = 70.
self.sigmaH = 7
self.phibar = None
self.sigmaPhi = None
self.Q=0
return
def download(self, url, format='rdb', and_read=False):
'''
Downloads the datafile from a url.
Parameters
----------
url : string
Web address of datafile.
format : string
Data format, 'rdb' or 'tdc2'
and_read : boolean
Read in data after downloading file?
Notes
-----
Don't forget to set `and_read=True` if you want to use the data!
'''
self.datafile = url.split('/')[-1]
if not os.path.isfile(self.datafile):
urllib.urlretrieve(url, self.datafile)
print 'Downloaded datafile:', url
if and_read:
self.read_in(format=format)
return
def read_in(self, datafile='self', format=None):
'''
Reads in light curve data from a file.
'''
if datafile == 'self':
pass
else:
self.datafile = datafile
if format == 'rdb':
self.lcs = read_in_rdb_data(self.datafile)
elif format == 'tdc2':
self.lcs = read_in_tdc2_data(self.datafile)
Q_FP_ERR = get_tdc2_header(self.datafile)
self.Q = Q_FP_ERR['Q']
self.phibar = Q_FP_ERR['FP']
self.sigmaPhi = Q_FP_ERR['FPErr']
else:
raise ValueError('Unrecognized or null format '+str(format))
self.Nim = len(self.lcs)
return
def prior(t):
Hbar=self.Hbar
sigmaH=self.sigmaH
phibar=self.phibar
sigmaPhi=self.sigmaPhi
Q=self.Q/(3.0*1E5)
# print(Q*phibar/Hbar)
f=1./(2*sigmaH*sigmaPhi*np.pi*Q)
s=-(Hbar)**2/(sigmaH**2)+(-phibar**2)/(sigmaPhi**2)
t=((Hbar/(sigmaH**2)+(phibar*t)/(Q*sigmaPhi**2))**2)/(1./(sigmaH**2)+(t**2)/((sigmaPhi**2)*(Q**2)))
normalize=np.max(t)+s
m=np.exp(s+t-normalize)
ft=(Hbar/sigmaH**2+(phibar*t)/(Q*(sigmaPhi**2)))/(1./sigmaH**2+t**2/((sigmaPhi**2)*(Q**2)))
fif=np.sqrt(np.pi/(1./sigmaH**2+t**2/((sigmaPhi**2)*(Q**2))))
return f*m*ft*fif
def optimize_spline_model(self):
'''
Optimizes a spline model for the intrinsic variability.
'''
return spl(self.lcs, knotstep=self.knotstep)
#========================================================== Plotting light curves
def display_light_curves(self, filename=None, jdrange=(None), title=None,
given_curve=None):
'''
Displays the lightcurves in a single panel plot.
'''
if given_curve is not None:
if len(given_curve) == 2:
lcs, agn = given_curve
else:
lcs = given_curve
agn = None
else:
lcs = self.lcs
agn = None
pycs.gen.mrg.colourise(lcs)
# Replace the following with an optional input list of shifts
#lcs[1].shifttime(-5.0)
#lcs[2].shifttime(-20.0)
#lcs[3].shifttime(-70.0)
pycs.gen.lc.display(lcs, [agn], figsize=(20, 7),
jdrange=jdrange, title=title, nicefont=True)
# lcs = pycs.gen.util
# for l in lcs:
# l.resetshifts()
if filename is not None:
pycs.gen.lc.display(lcs, [agn], figsize=(20, 7),
jdrange=jdrange, title=title, nicefont=True,
filename=filename)
return
def select_bands(self, bands):
'''
select bands you want to keep
Notes:
------
.. warning:: this function will change the light curve in SLTimer
'''
self.lcs = select_bands(self.lcs, bands)
def reset_lc(self):
for l in self.lcs:
l.resetshifts()
l.resetml()
return
def whiten(self):
'''
Whitens a set of multi-filter light curves to a single fictitious band.
'''
self.lcs = whiten(self.lcs)
return
#===================================================== Microlensing
def add_polynomial_microlensing(self):
'''
Adds polynomial microlensing to each lightcurve.
'''
pycs.gen.polyml.addtolc(self.lcs[0], nparams=3,
autoseasonsgap=600.0)
pycs.gen.polyml.addtolc(self.lcs[1], nparams=3,
autoseasonsgap=600.0)
if self.Nim == 4:
pycs.gen.polyml.addtolc(self.lcs[2], nparams=3,
autoseasonsgap=600.0)
pycs.gen.polyml.addtolc(self.lcs[3], nparams=3,
autoseasonsgap=600.0)
return
def add_spline_microlensing(self):
'''
Adds spline microlensing to each light curve.
'''
spline_microlensing(self.lcs, self.ml_knotstep)
return
#========================================= Primary workhorse method
def estimate_time_delays(self, method='pycs', microlensing='spline', agn='spline', error=None, quietly=False):
'''
Measures time delays between images, by modeling all the light
curves.
Parameters
----------
method : string
Modeling package to use.
microlensing : string
Choice of microlensing model to use.
agn : string
Choice of intrinsic AGN variability model to use.
error : string
Error estimation options [None, 'complete', 'intrinsic variance']
Notes
-----
Provides both polynomial and spline time delays.
Parameters
----------
method: string
Modeling package to use (currently only `pycs` is available)
microlensing: string
Model choice for microlensing light curves
agn: string
Model choice for intrinsic AGN variability
error: boolean
Estimate errors?
quietly: boolean
Redirect output to /dev/null?
'''
if method == 'pycs':
# print "You are using the pycs method."
pass
else:
print "The only available method is 'pycs' - exiting."
return
if quietly:
as_requested = {'stdout':None, 'stderr':None}
else:
as_requested = {'stdout':sys.stdout, 'stderr':sys.stderr}
# Tell the lightcurves that their model is going to include microlensing:
if microlensing == 'polynomial':
with SilentOperation(**as_requested):
self.add_polynomial_microlensing()
elif microlensing == 'spline':
with SilentOperation(**as_requested):
self.add_spline_microlensing()
else:
pass
# Keep a record:
self.microlensing = microlensing
# Optimize the model for both microlensing and intrinsic variability:
if agn == 'spline':
with SilentOperation(**as_requested):
self.agn = self.optimize_spline_model()
else:
print "Error: only free-knot spline models are available for AGN variability at present."
return
# Do error analysis, if required:
if error == 'complete':
with SilentOperation(**as_requested):
self.estimate_uncertainties()
elif error == 'intrinsic variance':
with SilentOperation(**as_requested):
self.find_intrinsic_variance()
else:
return
#===================================================== Evaluate the fitting
def compute_chisq(self, delay, batch=False, getlcs=False):
"""
return chisquare of spline fitting given time delay
Parameters
----------
delay : 1D array
array contains time delays for each light curve. The convention is
[dt_AB, dt_AC, dt_AD]
batch : bool
if batch==True, then delay can be a two dimensional array with each
row contains a set of time delay sample.
"""
if batch:
chisquare = []
for item in delay:
chisquare.append(get_chi_squared(
lcs_original=self.lcs,
ml_knotstep=self.ml_knotstep, delay=item,
getlcs=False, knotstep=self.knotstep
))
return chisquare
return get_chi_squared(lcs_original=self.lcs,
ml_knotstep=self.ml_knotstep,
getlcs=getlcs,
delay=delay, knotstep=self.knotstep)
def generate_random_sample(self, rangeList, nsample):
ndim = len(self.lcs)
#Generate samples
if rangeList is None:
rangeList = [[-100, 100]]*(ndim-1)
d = []
for item in xrange(ndim-1):
d.append(np.random.uniform(rangeList[item][0], rangeList[item][1],
nsample))
sample = np.array(d).T
return sample
def write_out_to(self, result, outName):
file_name = "{0}_delay_chi2_{1}_samples.txt".format(outName,
result.shape[0])
names = ["AB", "AC", "AD"]
header = "Smaples time delay for simple montecarlo and their corresponding \
chisquare. \n"
for index in xrange(result.shape[1]-1):
header += " dt_"+names[index]
header += " chisquare"
np.savetxt(file_name, result, header=header, comments="# ")
return
def plot_likelihood_from_file(self, file_name, chisquare=False, bins=20,
outName="from_file_", corner_plot=True,
add_prior= True):
result = np.loadtxt(file_name)
self.plot_likelihood(result, outName+file_name[-10:],
chisquare=chisquare, bins=bins,
corner_plot=corner_plot, add_prior=add_prior)
return
def plot_likelihood(self, result, outName, plot_contours=True,
plot_density=True, chisquare=False, bins=20,
corner_plot=True, add_prior=True):
import corner
log = True
sample = result[:, :-1]
if not chisquare:
weight = chi2_to_weight(result[:, -1])
title = "likelihood"
else:
weight = result[:, -1]
# weight = result[:, -1] - np.min(result[:, -1])
log = False
title = r"$\chi^2 plot$"
if corner_plot:
fig = corner.corner(sample, bins=bins,
labels=[r'$\Delta t_{AB}(days)$',
r'$\Delta t_{AC}(days)$',
r'$\Delta t_{AD}(days)$'],
weights=weight, plot_contours=plot_contours,
plot_density=plot_density,
max_n_ticks=10,
use_math_text=True
)
else:
if sample.shape[1] != 1:
print("corner=False can only be true when there is only 1D sample")
sample = sample.ravel()
fig = plt.figure()
ax = fig.add_subplot(111)
bins = np.linspace(sample.min(), sample.max(), bins)
wd, b = np.histogram(sample, bins=bins, weights=weight)
counts, b = np.histogram(sample, bins=bins)
bincentres = [(b[i]+b[i+1])/2. for i in range(len(b)-1)]
ax.set_xlabel(r'$\Delta t_{AB}(days)$')
ax.set_ylabel(r'$\chi^2$')
ax.step(bincentres, wd/counts, where='mid', color='k',
linestyle="-")
fig.suptitle(title)
fig.savefig("{0}_likelihood_{1}_samples.png".format(outName,
result.shape[0]))
return
def compute_likelihood_simpleMC(self, nsample=1000, nprocess=5,
rangeList=None, outName="",
save_file=True, samples=None):
'''
Compute the likelihood by Monte Carlo method
'''
from multiprocessing import Pool
from functools import partial
import time
if samples is not None:
sample = samples
nsample = len(sample)
else:
sample = self.generate_random_sample(rangeList=rangeList,
nsample=nsample)
#calculate the chisquare
start = time.time()
p = Pool(processes=nprocess)
chisquare = np.array(p.map(partial(
get_chi_squared,
lcs_original=self.lcs,
ml_knotstep=self.ml_knotstep,
getlcs=False,
knotstep=self.knotstep),
sample))
end = time.time()
print("Multiprocessing used {0} seconds.".format(end-start))
weight = chi2_to_weight(chisquare)
print("min chisquare,", np.min(chisquare))
print("#"*20)
print("weighted time delays (dAB,dAC,dAD)(days) :",
weight.T.dot(sample))
results = np.column_stack((sample, chisquare))
if save_file:
self.write_out_to(results, outName)
self.plot_likelihood(results, outName)
return sample[np.argmin(chisquare)]
def degree_of_freedom(self):
spline = pycs.spl.topopt.opt_rough(self.lcs, nit=1,
knotstep=self.knotstep,
verbose=False)
num = len(spline.t)
spline = pycs.spl.topopt.opt_rough(self.lcs, nit=1,
knotstep=self.ml_knotstep,
verbose=False)
num_ml = len(spline.t)
free_param = num*2+4+len(self.lcs)*(num_ml*2+4)+4
nm_constraint = 0
for l in self.lcs:
nm_constraint += len(l)
print("knotstep for intrinsic fluctuation is: {0}".format(self.knotstep))
print("knotstep for micro lensing is: {0}".format(self.ml_knotstep))
print("number of data points is: {0}".format(nm_constraint))
dof = nm_constraint-free_param
return {"dof" : dof, "# data" : nm_constraint}
def initialize_time_delays(self, method=None, pars=None):
'''
Initializes the curve shifts by specifying 1 or 3 time delays.
'''
if method is None:
dt = {'AB':0.0}
if self.Nim == 4:
dt['AC'] = 0.0
dt['AD'] = 0.0
elif method == 'guess':
dt = pars
assert pars is not None
assert len(dt) == (self.Nim - 1)
assert type(dt) == dict
elif method == 'simpleMC':
bestGuess = self.compute_likelihood_simpleMC(nsample=10,
nprocess=4,
save_file=False)
dt = {'AB': bestGuess[0]}
if self.Nim == 4:
dt = {'AC': bestGuess[1]}
dt = {'AD': bestGuess[2]}
else:
raise ValueError("Unrecognized initialization method '"+method+"'")
# Set the shifts of each light curve object in lcs:
# All lenses:
self.lcs[1].shifttime(dt['AB'])
# Quads only:
if self.Nim == 4:
self.lcs[2].shifttime(dt['AC'])
self.lcs[3].shifttime(dt['AD'])
# Report that shifting has occurred, and report time delays:
print "Initialization completed, using method '"+method+"'"
self.report_time_delays()
return
#===================================================== Resimulating the Data
def delete_old_files(self):
'''
Deletes the old files from previous error simulations.
'''
subprocess.call('rm -rfv sims_copies sims_mocks', shell=True)
subprocess.call('rm -rfv sims_copies_opt_spl sims_copies_opt_disp sims_copies_opt_regdiff', shell=True)
subprocess.call('rm -rfv sims_mocks_opt_spl sims_mocks_opt_disp sims_mocks_opt_regdiff', shell=True)
print "The old files have been deleted."
return
def make_plain_copies(self, n=None, npkl=None):
'''
Makes copies of the data.
'''
Ncopies = n*npkl
print "Making", Ncopies, "copies of the original dataset:"
pycs.sim.draw.multidraw(self.lcs, onlycopy=True, n=n, npkl=npkl, simset="copies")
return
def make_mock_light_curves(self, n=None, npkl=None):
'''
Make mock lightcurves to help estimate uncertainties.
'''
modellcs, modelspline = self.lcs, self.agn
def Atweakml(xlcs):
return pycs.sim.twk.tweakml(xlcs, beta=-1.5, sigma=0.25, fmin=1/500.0, fmax=None, psplot=False)
def Btweakml(xlcs):
return pycs.sim.twk.tweakml(xlcs, beta=-1.0, sigma=0.9, fmin=1/500.0, fmax=None, psplot=False)
def Ctweakml(xlcs):
return pycs.sim.twk.tweakml(xlcs, beta=-1.0, sigma=1.5, fmin=1/500.0, fmax=None, psplot=False)
def Dtweakml(xlcs):
return pycs.sim.twk.tweakml(xlcs, beta=-0.0, sigma=4.5, fmin=1/500.0, fmax=None, psplot=False)
Nmocks = n*npkl
truetsr = 8.0
print "Making", Nmocks, "synthetic datasets, varying time delays by +/-", truetsr/2.0, "days"
pycs.sim.draw.saveresiduals(modellcs, modelspline)
pycs.sim.draw.multidraw(modellcs, modelspline, n=n, npkl=npkl, simset="mocks", truetsr=truetsr, tweakml=[Atweakml, Btweakml, Ctweakml, Dtweakml])
return
#========================================Making Multiple Model Fits
def make_spline_model_fits_of_plain_copies(self):
# Pass the optimizer function to multirun:
pycs.sim.run.multirun("copies", self.lcs, spl, optset="spl", tsrand=10.0, keepopt=True)
return
def make_spline_model_fits_of_mock_light_curves(self):
tsrand = 1.0
# Pass the optimizer function to multirun:
pycs.sim.run.multirun("mocks", self.lcs, spl, optset="spl", tsrand=tsrand, keepopt=True)
return
def plot_intrinsic_variance_histograms(self): #The histogram will give the instrinsic variance
dataresults = [pycs.sim.run.collect("sims_copies_opt_spl", "blue", "Free-knot spline technique")]
pycs.sim.plot.hists(dataresults, r=5.0, nbins=100, showqs=False,
filename="fig_intrinsicvariance.pdf", dataout=True)
return
#=================================================== Error Analysis
def error_summary(self):
simresults = [
pycs.sim.run.collect("sims_mocks_opt_spl", "blue", "Free-knot spline technique")]
# Nice to replace self.time_delays with a version including error bars here...
# Maybe write out the "samples" for post-processing! Could also make a corner plot...
# Compare measured time delays with truth:
pycs.sim.plot.measvstrue(simresults, errorrange=3.5, r=5.0, nbins = 1, binclip=True, binclipr=20.0,
plotpoints=False, filename="fig_measvstrue.pdf", dataout=True)
# Plot covariances between delays:
pycs.sim.plot.covplot(simresults, filename="fig_covplot.pdf")
# Create a summary plot (of error bars and relationship bewtween measurements):
spl = (pycs.gen.util.readpickle("sims_copies_opt_spl_delays.pkl"),
pycs.gen.util.readpickle("sims_mocks_opt_spl_errorbars.pkl"))
# One last plot:
pycs.sim.plot.newdelayplot([spl], rplot=6.0, displaytext=True,
filename = "fig_delays.pdf", refshifts=[{"colour":"gray", "shifts":(0, -5, -20, -70)}])
return
#=====================================================Complete Error Analysis
def estimate_uncertainties(self, n=None, npkl=None):
self.delete_old_files()
self.make_plain_copies(n=n, npkl=npkl)
self.make_mock_light_curves(n=n, npkl=npkl)
# Add in an option to use regdiff and disp here
self.make_spline_model_fits_of_plain_copies()
self.make_spline_model_fits_of_mock_light_curves()
self.plot_intrinsic_variance_histograms()
self.error_summary()
return
def find_intrinsic_variance(self,n=None, npkl=None):
self.make_plain_copies(n=n, npkl=npkl)
self.make_spline_model_fits_of_plain_copies()
self.plot_intrinsic_variance_histograms()
return
def report_time_delays(self):
print "Time Delays:"
self.time_delays = pycs.gen.lc.getnicetimedelays(self.lcs, separator="\n", sorted=True)
print self.time_delays
return self.time_delays
# ======================================================================
# End of the SLTimer class.
# ======================================================================
# Optimizer functions (could go in "optimize.py" instead?)
def spl(lcs, shifttime=True, verbose=True, knotstep=20):
spline = pycs.spl.topopt.opt_rough(lcs, nit=5, knotstep=5/2.*knotstep,
shifttime=shifttime, verbose=verbose)
spline = pycs.spl.topopt.opt_rough(lcs, nit=5, knotstep=3/2.*knotstep,
shifttime=shifttime, verbose=verbose)
spline = pycs.spl.topopt.opt_fine(lcs, nit=10, knotstep=knotstep,
shifttime=shifttime, verbose=verbose)
return spline
def spline_microlensing(lcs, ml_knotstep):
if ml_knotstep is None:
print("you didn't add any microlensing")
else:
for l in lcs:
pycs.gen.splml.addtolc(l, knotstep=ml_knotstep)
return
# To compute the chisquare
def get_chi_squared(delay, lcs_original, ml_knotstep, getlcs, knotstep=20):
import copy
lcs = copy.deepcopy(lcs_original)
for l in lcs:
l.resetshifts()
l.resetml()
spline_microlensing(lcs, ml_knotstep)
for index, l in enumerate(lcs):
if index != 0:
l.timeshift = delay[index-1]
spline = spl(lcs, verbose=False, shifttime=False, knotstep=knotstep)
if getlcs:
return [lcs, spline]
else:
return spline.lastr2nostab
def chi2_to_weight(chisquare):
weight = np.exp(-0.5*(chisquare-np.min(chisquare)))
weight /= np.sum(weight)
return weight
|
bsd-3-clause
|
ahoyosid/scikit-learn
|
sklearn/svm/tests/test_svm.py
|
14
|
29378
|
"""
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.datasets.samples_generator import make_classification
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1)
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="auto"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('auto', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='auto' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='auto')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_inheritance():
# check that SVC classes can do inheritance
class ChildSVC(svm.SVC):
def __init__(self, foo=0):
self.foo = foo
svm.SVC.__init__(self)
clf = ChildSVC()
clf.fit(iris.data, iris.target)
clf.predict(iris.data[-1])
clf.decision_function(iris.data[-1])
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0)
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
if __name__ == '__main__':
import nose
nose.runmodule()
|
bsd-3-clause
|
tkaitchuck/nupic
|
external/linux64/lib/python2.6/site-packages/numpy/lib/npyio.py
|
6
|
63502
|
__all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import sys
import itertools
import warnings
import weakref
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
from _compiled_base import packbits, unpackbits
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
try:
name = f.name
except AttributeError:
# Backward compatibility for <= 2.5
name = f.filename
mode = f.mode
f = GzipFile(fileobj=f.fileobj, filename=name)
f.mode = mode
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
own_fid = True
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=True)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
*args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
**kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with *args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with **kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
See Also
--------
numpy.savez_compressed : Save several arrays into a compressed .npz file format
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
else:
fh = iter(open(fname, 'U'))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing == None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.next()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = fh.next()
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if not ndmin in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n'):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
Character separating columns.
newline : str
.. versionadded:: 1.5.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt),] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
finally:
if own_fh:
fh.close()
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables names.
By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError("fname mustbe a string, filehandle, or generator. "\
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn(\
"The use of `skiprows` is deprecated, it will be removed in numpy 2.0.\n" \
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.next()
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = fhd.next()
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn(\
"The use of `missing` is deprecated, it will be removed in Numpy 2.0.\n" \
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
|
gpl-3.0
|
imaculate/scikit-learn
|
sklearn/decomposition/nmf.py
|
6
|
47038
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck
# Mathieu Blondel <[email protected]>
# Tom Dupre la Tour
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (Projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import division, print_function
from math import sqrt
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted, check_non_negative
from ..utils import deprecated
from ..exceptions import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _safe_compute_error(X, W, H):
"""Frobenius norm between X and WH, safe for sparse array"""
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(norm_X + norm_WH - 2. * cross_prod)
return error
def _check_string_param(sparseness, solver):
allowed_sparseness = (None, 'data', 'components')
if sparseness not in allowed_sparseness:
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, allowed_sparseness))
allowed_solver = ('pg', 'cd')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise 'random'.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
eps : float
Truncate all values less then this in output to zero.
random_state : int seed, RandomState instance, or None (default)
Random number generator seed control, used in 'nndsvdar' and
'random' modes.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
V : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
def _update_projected_gradient_w(X, W, H, tolW, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = H.shape[0]
if sparseness is None:
Wt, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(beta) * np.ones((1,
n_components_))]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
Wt, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(eta) * np.eye(n_components_)]),
W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return Wt.T, gradW.T, iterW
def _update_projected_gradient_h(X, W, H, tolH, nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Helper function for _fit_projected_gradient"""
n_samples, n_features = X.shape
n_components_ = W.shape[1]
if sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((n_components_, n_features))]),
safe_vstack([W,
np.sqrt(eta) * np.eye(n_components_)]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
elif sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W, np.sqrt(beta) * np.ones((1, n_components_))]),
H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio)
return H, gradH, iterH
def _fit_projected_gradient(X, W, H, tol, max_iter,
nls_max_iter, alpha, l1_ratio,
sparseness, beta, eta):
"""Compute Non-negative Matrix Factorization (NMF) with Projected Gradient
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with Sparseness Constraints.
Journal of Machine Learning Research 2004.
"""
gradW = (np.dot(W, np.dot(H, H.T)) -
safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H) -
safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition
# as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
W, gradW, iterW = _update_projected_gradient_w(X, W, H, tolW,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _update_projected_gradient_h(X, W, H, tolH,
nls_max_iter,
alpha, l1_ratio,
sparseness, beta, eta)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
W, _, _ = _update_projected_gradient_w(X, W, H, tol, nls_max_iter,
alpha, l1_ratio, sparseness,
beta, eta)
return W, H, n_iter
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = fast_dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase of the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization corresponds to decrease of each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, alpha=0.001,
l1_ratio=0., regularization=None, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
# L1 and L2 regularization
l1_H, l2_H, l1_W, l2_W = 0, 0, 0, 0
if regularization in ('both', 'components'):
alpha = float(alpha)
l1_H = l1_ratio * alpha
l2_H = (1. - l1_ratio) * alpha
if regularization in ('both', 'transformation'):
alpha = float(alpha)
l1_W = l1_ratio * alpha
l2_W = (1. - l1_ratio) * alpha
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, l1_W, l2_W,
shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, l1_H, l2_H,
shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
tol=1e-4, max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False, nls_max_iter=2000,
sparseness=None, beta=1, eta=0.1):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a (deprecated) Projected Gradient solver.
'cd' is a Coordinate Descent solver.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
_check_string_param(sparseness, solver)
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, numbers.Integral) or n_components <= 0:
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, numbers.Integral) or max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive integer;"
" got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
if solver == 'pg':
warnings.warn("'pg' solver will be removed in release 0.19."
" Use 'cd' solver instead.", DeprecationWarning)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(X, W, H, tol,
max_iter,
nls_max_iter,
alpha, l1_ratio,
sparseness,
beta, eta)
else: # transform
W, H, n_iter = _update_projected_gradient_w(X, W, H,
tol, nls_max_iter,
alpha, l1_ratio,
sparseness, beta,
eta)
elif solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol,
max_iter,
alpha, l1_ratio,
regularization,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, init=None, solver='cd',
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0, shuffle=False,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
if sparseness is not None:
warnings.warn("Controlling regularization through the sparseness,"
" beta and eta arguments is only available"
" for 'pg' solver, which will be removed"
" in release 0.19. Use another solver with L1 or L2"
" regularization instead.", DeprecationWarning)
self.nls_max_iter = nls_max_iter
self.sparseness = sparseness
self.beta = beta
self.eta = eta
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components,
init=self.init, update_H=True, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
if self.solver == 'pg':
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
self.reconstruction_err_ = _safe_compute_error(X, W, H)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Attributes
----------
components_ : array-like, shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Attributes
----------
n_iter_ : int
Actual number of iterations for the transform.
Returns
-------
W: array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle,
nls_max_iter=self.nls_max_iter, sparseness=self.sparseness,
beta=self.beta, eta=self.eta)
self.n_iter_ = n_iter_
return W
def inverse_transform(self, W):
"""Transform data back to its original space.
Parameters
----------
W: {array-like, sparse matrix}, shape (n_samples, n_components)
Transformed data matrix
Returns
-------
X: {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix of original shape
.. versionadded:: 0.18
"""
check_is_fitted(self, 'n_components_')
return np.dot(W, self.components_)
@deprecated("It will be removed in release 0.19. Use NMF instead."
"'pg' solver is still available until release 0.19.")
class ProjectedGradientNMF(NMF):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvdar' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'pg' | 'cd'
Numerical solver to use:
'pg' is a Projected Gradient solver (deprecated).
'cd' is a Coordinate Descent solver (recommended).
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : integer, default: 200
Number of iterations to compute.
random_state : integer seed, RandomState instance, or None (default)
Random number generator seed control.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
nls_max_iter : integer, default: 2000
Number of iterations in NLS subproblem.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error. Used only in the deprecated 'pg' solver.
.. versionchanged:: 0.17
Deprecated Projected Gradient solver. Use Coordinate Descent solver
instead.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
NMF(alpha=0.0, beta=1, eta=0.1, init='random', l1_ratio=0.0, max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, shuffle=False,
solver='cd', sparseness=None, tol=0.0001, verbose=0)
>>> model.components_
array([[ 2.09783018, 0.30560234],
[ 2.13443044, 2.13171694]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00115993...
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., verbose=0,
nls_max_iter=2000, sparseness=None, beta=1, eta=0.1):
super(ProjectedGradientNMF, self).__init__(
n_components=n_components, init=init, solver='pg', tol=tol,
max_iter=max_iter, random_state=random_state, alpha=alpha,
l1_ratio=l1_ratio, verbose=verbose, nls_max_iter=nls_max_iter,
sparseness=sparseness, beta=beta, eta=eta)
|
bsd-3-clause
|
andrewwang79/walle
|
ximu/bus_data_crawl.py
|
1
|
5642
|
# -*- encoding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
from pandas import DataFrame
import pandas as pd
import json
import time
class BusData:
def __init__(self):
"""
ak,ip,port作隐藏处理
"""
self.city = "上海市"
self.ak = "your ak"
self.data_path = r"C:\Users\prince\Desktop\POI_bus.xlsx"
ip = "123.45.67.890"
port = 00000
client = MongoClient(ip, port)
db = client.eastnb
self.collection = db.bus_line
@staticmethod
def _now_time():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
def _gps_search_name(self, name):
"""百度地图API-POI查询"""
url = r"http://api.map.baidu.com/place/v2/search"
r = requests.get(url=url, params={
"query": "%s" % name,
"page_size": "1",
"page_num": "0",
"scope": "1",
"region": self.city,
"output": "json",
"ak": self.ak
})
if r.json()["status"] != 0:
raise ()
try:
location = r.json()["results"][0]["location"]
except:
return {"lat": None, "lng": None}
else:
return {"lat": round(float(location["lat"]), 6), "lng": round(float(location["lng"]), 6)}
def data_save(self):
request = requests.get(url=r"https://zh.wikipedia.org/wiki/上海公交线路表")
bsobj = BeautifulSoup(request.text, "html.parser")
all_line = bsobj.select("tr")
all_line_info_list = []
for line in all_line:
line_info_list = line.select("td")
if len(line_info_list) == 7:
Item = {
"name": line_info_list[0].get_text(strip=True),
"operator": line_info_list[1].get_text(strip=True) if line_info_list[1].get_text(strip=True) else None,
"start-stop": line_info_list[2].get_text(strip=True) if line_info_list[3].get_text(strip=True) else None,
"price": line_info_list[3].get_text(strip=True) if line_info_list[3].get_text(strip=True) else None,
"mileage": line_info_list[4].get_text(strip=True) if line_info_list[4].get_text(strip=True) else None,
"note": line_info_list[5].get_text(strip=True) if line_info_list[5].get_text(strip=True) else None,
"preview": "https:" + line_info_list[6].select("img")[0].get("src") if line_info_list[6].select("img") else None
}
all_line_info_list.append(Item)
DF = DataFrame(all_line_info_list)
DF.to_excel(self.data_path, index=False, encoding="utf-8")
def data_processing(self):
self.data_save()
DF = pd.read_excel(self.data_path)
for i in range(len(DF['name'])):
Item = dict(DF.loc[i])
for key in Item:
if str(Item[key]) == "nan":
Item[key] = None
bus_info = requests.get(url=r"http://xxbs.sh.gov.cn:8080/weixinpage/HandlerOne.ashx",
params={"name": Item["name"]},)
try:
bus_dict = dict(bus_info.json())
except json.decoder.JSONDecodeError:
pass
else:
Item["line_id"] = bus_dict['line_id'].strip()
stop_info = requests.get(url=r"http://xxbs.sh.gov.cn:8080/weixinpage/HandlerTwo.ashx",
params={"name": Item["name"], "lineid": Item["line_id"]})
stop_list = [stop_info.json()["lineResults0"]["stops"]]
Item["direction"] = [{}]
if stop_info.json().get("lineResults1"):
stop_list.append(stop_info.json().get("lineResults1")['stops'])
Item["direction"] = [{}] * 2
for n in range(len(stop_list)):
if n == 0:
Item["direction"][n]["start_stop"] = bus_dict["start_stop"].strip()
Item["direction"][n]["end_stop"] = bus_dict["end_stop"].strip()
Item["direction"][n]["early_time"] = bus_dict["start_earlytime"].strip()
Item["direction"][n]["late_time"] = bus_dict["start_latetime"].strip()
else:
Item["direction"][n]["start_stop"] = bus_dict["end_stop"].strip()
Item["direction"][n]["end_stop"] = bus_dict["start_stop"].strip()
Item["direction"][n]["early_time"] = bus_dict["end_earlytime"].strip()
Item["direction"][n]["late_time"] = bus_dict["end_latetime"].strip()
Item["direction"][n]["stop_list"] = []
for stop in stop_list[n]:
address = stop["zdmc"] + Item["name"] + "公交站"
loc = self._gps_search_name(address)
Item["direction"][n]["stop_list"].append({
"stop_id": stop["id"],
"stop_name": stop["zdmc"],
"stop_lat": loc["lat"],
"stop_lng": loc["lng"]
})
Item["creat_time"] = self._now_time()
Item["source"] = "xxbs.sh.gov.cn:"
Item["href"] = bus_info.url
# print(Item)
self.collection.insert_one(Item)
print(i, 'ok')
if __name__ == "__main__":
bus_data = BusData()
bus_data.data_processing()
|
apache-2.0
|
r-kamath/zeppelin
|
python/src/main/resources/grpc/python/ipython_client.py
|
27
|
1457
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
import ipython_pb2
import ipython_pb2_grpc
def run():
channel = grpc.insecure_channel('localhost:50053')
stub = ipython_pb2_grpc.IPythonStub(channel)
response = stub.execute(ipython_pb2.ExecuteRequest(code="import time\nfor i in range(1,4):\n\ttime.sleep(1)\n\tprint(i)\n" +
"%matplotlib inline\nimport matplotlib.pyplot as plt\ndata=[1,1,2,3,4]\nplt.figure()\nplt.plot(data)"))
for r in response:
print("output:" + r.output)
response = stub.execute(ipython_pb2.ExecuteRequest(code="range?"))
for r in response:
print(r)
if __name__ == '__main__':
run()
|
apache-2.0
|
Srisai85/scikit-learn
|
sklearn/calibration.py
|
137
|
18876
|
"""Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
|
bsd-3-clause
|
pdamodaran/yellowbrick
|
paper/figures/figures.py
|
1
|
5904
|
#!/usr/bin/env python3
# Script to create visualizations for the JOSS paper
import os
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from yellowbrick.features import Rank2D, RadViz
from yellowbrick.model_selection import LearningCurve
from yellowbrick.cluster import KElbowVisualizer, SilhouetteVisualizer
from yellowbrick.classifier import ClassificationReport, DiscriminationThreshold
from yellowbrick.regressor import ResidualsPlot, PredictionError, AlphaSelection
from collections import namedtuple
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestRegressor
from sklearn.cluster import MiniBatchKMeans, Birch
from sklearn.model_selection import train_test_split as tts
from sklearn.linear_model import LassoCV, RidgeCV, LogisticRegression
# Store figures alongside the script that generates them
FIGURES = os.path.dirname(__file__)
# Path to datasets downloaded from S3
DATA = os.path.join(
os.path.dirname(__file__), "..", "..", "yellowbrick", "datasets", "fixtures"
)
# Quick reference dataset objects
Dataset = namedtuple('Dataset', 'X,y')
Split = namedtuple('Split', 'train,test')
def _make_dataset(X, y, split=False):
if split:
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2)
return Dataset(Split(X_train, X_test), Split(y_train, y_test))
return Dataset(X, y)
def load_occupancy(split=False):
"""
Create a dataset for the specified yb dataset
"""
path = os.path.join(DATA, "occupancy", "occupancy.csv")
data = pd.read_csv(path)
X = data[["temperature", "relative humidity", "light", "C02", "humidity"]]
y = data["occupancy"]
return _make_dataset(X, y, split)
def load_concrete(split=False):
path = os.path.join(DATA, "concrete", "concrete.csv")
data = pd.read_csv(path)
X = data[['cement', 'slag', 'ash', 'water', 'splast', 'coarse', 'fine', 'age']]
y = data['strength']
return _make_dataset(X, y, split)
def load_spam(split=False):
path = os.path.join(DATA, "spam", "spam.csv")
data = pd.read_csv(path)
target = "is_spam"
features = [col for col in data.columns if col != target]
X = data[features]
y = data[target]
return _make_dataset(X, y, split)
def feature_analysis(fname="feature_analysis.png"):
"""
Create figures for feature analysis
"""
# Create side-by-side axes grid
_, axes = plt.subplots(ncols=2, figsize=(18,6))
# Draw RadViz on the left
data = load_occupancy(split=False)
oz = RadViz(ax=axes[0], classes=["unoccupied", "occupied"])
oz.fit(data.X, data.y)
oz.finalize()
# Draw Rank2D on the right
data = load_concrete(split=False)
oz = Rank2D(ax=axes[1])
oz.fit_transform(data.X, data.y)
oz.finalize()
# Save figure
path = os.path.join(FIGURES, fname)
plt.tight_layout()
plt.savefig(path)
def regression(fname="regression.png"):
"""
Create figures for regression models
"""
_, axes = plt.subplots(ncols=2, figsize=(18, 6))
alphas = np.logspace(-10, 1, 300)
data = load_concrete(split=True)
# Plot prediction error in the middle
oz = PredictionError(LassoCV(alphas=alphas), ax=axes[0])
oz.fit(data.X.train, data.y.train)
oz.score(data.X.test, data.y.test)
oz.finalize()
# Plot residuals on the right
oz = ResidualsPlot(RidgeCV(alphas=alphas), ax=axes[1])
oz.fit(data.X.train, data.y.train)
oz.score(data.X.test, data.y.test)
oz.finalize()
# Save figure
path = os.path.join(FIGURES, fname)
plt.tight_layout()
plt.savefig(path)
def classification(fname="classification.png"):
# Create side-by-side axes grid
_, axes = plt.subplots(ncols=2, figsize=(18,6))
# Add ClassificationReport to the reft
data = load_spam(split=True)
oz = ClassificationReport(MultinomialNB(), classes=["ham", "spam"], ax=axes[0])
oz.fit(data.X.train, data.y.train)
oz.score(data.X.test, data.y.test)
oz.finalize()
# Add DiscriminationThreshold to the right
data = load_spam(split=False)
oz = DiscriminationThreshold(LogisticRegression(), ax=axes[1])
oz.fit(data.X, data.y)
oz.finalize()
# Save figure
path = os.path.join(FIGURES, fname)
plt.tight_layout()
plt.savefig(path)
def clustering(fname="clustering.png"):
# Create side-by-side axes grid
_, axes = plt.subplots(ncols=2, figsize=(18,6))
X, y = make_blobs(centers=7)
# Add K-Elbow to the left
oz = KElbowVisualizer(MiniBatchKMeans(), k=(3,12), ax=axes[0])
oz.fit(X, y)
oz.finalize()
# Add SilhouetteVisualizer to the right
oz = SilhouetteVisualizer(Birch(n_clusters=5), ax=axes[1])
oz.fit(X, y)
oz.finalize()
# Save figure
path = os.path.join(FIGURES, fname)
plt.tight_layout()
plt.savefig(path)
def hyperparameter_tuning(fname="hyperparameter_tuning.png"):
# Create side-by-side axes grid
_, axes = plt.subplots(ncols=2, figsize=(18,6))
# Load the concrete dataset
data = load_concrete(split=False)
# Create a list of alphas to cross-validate against
alphas = np.logspace(-10, 1, 400)
# Add AlphaSelection to the left
oz = AlphaSelection(LassoCV(alphas=alphas), ax=axes[0])
oz.fit(data.X, data.y)
oz.finalize()
# Add LearningCurve to the right
oz = LearningCurve(RandomForestRegressor(), scoring='r2', ax=axes[1])
oz.fit(data.X, data.y)
oz.finalize()
# Save figure
path = os.path.join(FIGURES, fname)
plt.tight_layout()
plt.savefig(path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="generate visualizations for JOSS paper"
)
args = parser.parse_args()
feature_analysis()
regression()
classification()
clustering()
hyperparameter_tuning()
|
apache-2.0
|
batxes/4Cin
|
src/paint_model.py
|
2
|
15498
|
#!/usr/bin/python
import sys
import inspect
import re
import pysam
import os
import numpy as np
import heapq
import matplotlib
import matplotlib.cm as cm
import pylab
import argparse
# realpath() will make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# use this if you want to include modules from a subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"src")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
from data_manager import fileCheck, calculate_fragment_number
working_dir = (os.path.realpath(__file__)).split("/")[:-1]
working_dir = "/".join(working_dir)+"/"
parser = argparse.ArgumentParser(
description = "Script that paints a 3D model generate with 4Cin, providing a bam or bed file.",
epilog = "if vHi-C (virtual Hi-C) is provided, a clustered RMSD is generated with the beads of the painted model depicted in the diagonal")
parser.add_argument("model", action="store", help="3D model that will be painted.")
parser.add_argument("--vHi-C", action="store", dest="distance_matrix", default = "None", help="vHi-C (virtual Hi-C) file generated with 4Cin.")
parser.add_argument("--name", action="store",dest="prefix",default="4Cin", help="Name for the generated files.")
parser.add_argument("--fragments_in_each_bead",type=int, action="store",dest="fragments_in_each_bead",default=0, help="Number of fragments that will be represented with each bead")
parser.add_argument("data_dir", action="store",help='location of the 4C data. primers.txt file needs to be also in the directory')
parser.add_argument("--working_dir", action="store",default=working_dir, dest="working_dir",help='location where the models will be generated')
parser.add_argument("painting_path", action="store",help="Bed/Bam file.")
parser.add_argument("colormap", action="store",help="Colormap (matplotlib) to paint the model. Check: http://matplotlib.org/examples/color/colormaps_reference.html.")
args = parser.parse_args()
model = args.model
data_dir = args.data_dir
painting_path = args.painting_path
colormap = args.colormap
distance_matrix = args.distance_matrix
prefix = args.prefix
fragments_in_each_bead = args.fragments_in_each_bead
data_dir = args.data_dir
working_dir = args.working_dir
if data_dir[-1] != "/":
data_dir = data_dir + "/"
#get the name and position from primers.txt
#primers.txt: name chrN:position
primers = {}
viewpoint_positions = []
primers_file = fileCheck(data_dir+"primers.txt")
for line in primers_file:
m = re.search('([^\s\t]+).*chr\w+:(\d+)', line)
try:
primers[m.group(1)] = int(m.group(2))
except:
break
print "\nPrimers.txt. These are the viewpoints that will be used in the modeling:"
for k,v in primers.iteritems():
print "Viewpoint:{}\tposition:{}".format(k,v)
viewpoint_positions.append(v)
print
file_names = primers.keys()
files = [data_dir+f for f in file_names]
# read one of the files and get number of fragments and default fragments_in_each_bead
# a_4c_file: chrN start end value
start_frag = 0
end_frag = 0
number_of_fragments = 0
a_4c_file = fileCheck(data_dir+primers.keys()[0])
for line in a_4c_file:
values = line.split()
if len(values) != 4:
continue
if start_frag == 0:
start_frag = int(values[1])
end_frag = int(values[2])
number_of_fragments += 1
locus_size = end_frag - start_frag
viewpoint_fragments = calculate_fragment_number(viewpoint_positions,files[0])
#default, we want 100 beads in each model
if fragments_in_each_bead == 0:
fragments_in_each_bead = int(number_of_fragments / 100)
viewpoint_fragments = [int(i/fragments_in_each_bead) for i in viewpoint_fragments]
are_genes = viewpoint_fragments
# now get number of beads
number_of_fragments = int(number_of_fragments/fragments_in_each_bead)
storage_dir = model.split("/")[:-1]
storage_dir = "/".join(storage_dir)
bam_or_bed = painting_path[-3:]
if bam_or_bed != "bam" and bam_or_bed != "bed":
print "Data file used to paint needs to be a bam or bed file."
sys.exit()
#read the files and create a bed_file
starts = []
ends = []
if bam_or_bed == "bam":
print "Reading bam file..."
bamhandle = pysam.AlignmentFile(painting_path,"rb")
with open ("bedbam_file","w") as stdout:
with open (files[0],"r") as stdin:
for line in stdin:
values = line.split("\t")
starts.append(int(values[1]))
ends.append(int(values[2]))
#change first char of CHR
chr_ = list(values[0])
chr_[0] = 'c'
values[0] = "".join(chr_)
read_count = bamhandle.count(values[0],int(values[1]),int(values[2])) #chrm, start, end
stdout.write("{}\t{}\t{}\t{}\n".format(values[0],values[1],values[2],read_count))
elif bam_or_bed == "bed":
with open (files[0],"r") as stdin:
for line in stdin:
values = line.split("\t")
starts.append(int(values[1]))
ends.append(int(values[2]))
# create bed file for DNAmet
print "Reading DNA met file..."
counter = 0
total_reads = 0
with open (painting_path,"r") as stdin:
print "Writing bed file with DNA met data..."
with open ("bedbam_file","w") as stdout:
for line in stdin:
if counter == number_of_fragments_ALL:
break
else:
values = line.split("\t")
if int(values[0]) == 13:
if starts[counter] <= int(values[1]) <= ends[counter]:
total_reads += float(values[4])/float(values[5])
else:
if total_reads == 0 and counter > 0 and starts[counter] <= int(values[1]): #gap in data
stdout.write("{}\t{}\t{}\t{}\n".format("chr13",starts[counter],ends[counter],0))
counter += 1
if total_reads != 0:
stdout.write("{}\t{}\t{}\t{}\n".format("chr13",starts[counter],ends[counter],total_reads))
counter += 1
total_reads = 0
# create bed file
print "Painting genome..."
#file format: chr from to value
bead_values = []
with open ("bedbam_file","r") as stdin:
counter = 0
added_reads = 0
added_region = 0
for line in stdin:
values = line.split("\t")
added_region += float(values[2])-float(values[1])
added_reads += float(values[3])
counter += 1
if counter == fragments_in_each_bead:
normalized_read = added_reads/added_region
bead_values.append(normalized_read)
#print normalized_read
counter = 0
added_region = 0
added_reads = 0
#We dont use the last bead (Modeling does int())
#if counter != fragments_in_each_bead and counter != 0: #we add the min value to the last bead if it did not reach to Nfragments
# normalized_read = added_reads/added_region
# bead_values.append(normalized_read)
import matplotlib as mpl
#cmap = cm.Blues
cmap = cm.get_cmap(colormap)
#take out outliers (Q1-1.5xIQR - Q3+1.5*IQR take only)
mid = np.percentile(bead_values,50)
Q1 = np.percentile(bead_values,25)
Q3 = np.percentile(bead_values,75)
IQR = Q3 - Q1
inlier1 = Q1 - 1.5*IQR
inlier2 = Q3 + 1.5*IQR
vmax = 0
for max_value in bead_values:
if max_value >= inlier1 and max_value <= inlier2:
if max_value > vmax:
vmax = max_value
vmin = 1000000
for min_value in bead_values:
if min_value >= inlier1 and min_value <= inlier2:
if min_value <= vmin:
vmin = min_value
#for ctcf h3k4me3
#print "min value = ",vmax
#print "max value = ",inlier2
#norm = mpl.colors.Normalize(vmin=vmax, vmax=inlier2)
#for dnamet, h3k27ac,atac
#print "min value = ",vmin
#print "max value = ",vmax
#ONLY FOR INVERSION
bead_values[35:64] = bead_values[64:35:-1]
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
m = cm.ScalarMappable(norm=norm, cmap=cmap)
with open("{}/coloring.cmd".format(storage_dir),"w") as colored_model:
colored_model.write("open {}\n".format(model))
for number in range(number_of_fragments):
current_color = matplotlib.colors.rgb2hex(m.to_rgba(bead_values[number]))
colored_model.write("color {} #{}\n".format(current_color,number))
#print "bead: {} color:{} value:{}".format(number,matplotlib.colors.rgb2hex(m.to_rgba(bead_values[number])),bead_values[number])
colored_model.write("shape tube #{}-{} radius 200 bandlength 10000".format(0,number_of_fragments))
print "coloring.cmd generated in {}/".format(storage_dir)
print "Now, open coloring.cmd wich Chimera."
print "\nGenerating some plots..."
#plot statistic figures
fig = pylab.figure(figsize=(8,8))
fig.suptitle("Epigenetic Marks")
pylab.xlabel("Bead Number")
pylab.ylabel("Score")
h = pylab.bar(range(len(bead_values)),bead_values,color=cmap(norm(bead_values)),width=1,linewidth=0)
#for i in range(len(bead_values)-1):
#pylab.vlines(i,0,bead_values[i],color=cmap(norm(bead_values[i])),linewidth=6)
#pylab.fill_between([range(len(bead_values))[i],range(len(bead_values))[i+1]],[bead_values[i],bead_values[i+1]],color=cmap(norm(bead_values[i])))
axes = pylab.gca()
axes.set_xlim([0-0.5,len(bead_values)-0.5])
axes.set_axis_bgcolor('white')
try:
fig.savefig('{}/genome_painting_stats_plot_{}.png'.format(storage_dir,prefix))
print "Plot painted in {}/".format(storage_dir,prefix)
except:
pass
fig = pylab.figure(figsize=(8,8))
pylab.hist(bead_values,bins=100)
fig.suptitle("Epigenetic Marks. Histogram.")
pylab.ylabel("Frequency")
pylab.xlabel("Score")
try:
fig.savefig('{}/genome_painting_stats_hist_{}.png'.format(storage_dir,prefix))
print "Histogram painted in {}/".format(storage_dir,prefix)
except:
pass
fig = pylab.figure(figsize=(8,8))
pylab.boxplot(bead_values)
fig.suptitle("Epigenetic Marks. Boxplot.")
pylab.ylabel("Score")
try:
fig.savefig('{}/genome_painting_stats_box_{}.png'.format(storage_dir,prefix))
print "Box plot painted in {}/".format(storage_dir,prefix)
except:
pass
#distance_matrix needed for this plot
if distance_matrix != "None":
bead_count = len(bead_values)
fig = pylab.figure(figsize=(8,8))
distance_value = []
epigenetic_value = []
color_value = []
with open(distance_matrix, 'r') as mtx:
for line in mtx:
values = re.split(",",line)
if int(values[0]) != int(values[1]):
#if int(values[0]) == 0:
distance_value.append(float(values[2]))
epigenetic_value.append(bead_values[int(values[1])])
color_value.append(cmap(norm(bead_values))[int(values[0])])
print len(distance_value)
for i in range(0,(bead_count-1)*(bead_count-1),bead_count):
from_ = i
to_ = i+bead_count-1
#print distance_value[from_:to_]
pylab.plot(np.unique(distance_value[from_:to_]), np.poly1d(np.polyfit(distance_value[from_:to_], epigenetic_value[from_:to_], 1))(np.unique(distance_value[from_:to_])))
pylab.scatter(distance_value,epigenetic_value,color=color_value)
#fitting
#pylab.plot(np.unique(distance_value), np.poly1d(np.polyfit(distance_value, epigenetic_value, 1))(np.unique(distance_value)))
pylab.xlim(0)
try:
fig.savefig('genome_painting_proximity_plot.png')
except:
pass
#print distance_value
#print epigenetic_value
#print color_value
#test.
# check out size of beads in the scatterplot
# check out the matrix_path
#check out the vmin and vmax
dendro_test = False
if dendro_test:
####################
import sys
import operator
import re
import subprocess
from os import listdir, remove
from os.path import isfile, join
from itertools import combinations
import scipy.cluster.hierarchy as sch
import pylab
from pylab import plot,show
from scipy.cluster.vq import kmeans,vq
matrix_path = "/home/ibai/4c2vhic/data/Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/distances_of_current_model_Six_zebra_models"
#matrix_path = "/home/ibai/4c2vhic/distances_of_current_model_zebra_200.txt"
article = True
enriched_dendro = True
cutoff = 0.25
cutoff = 0.0 #ALL beads
value_list = []
count = 0
bead_values = bead_values[0:-1]
bead_colors = []
if article:
for bead in bead_values:
bead_colors.append(bead)
if enriched_dendro:
if bead >= cutoff:
value_list.append(count)
else:
if bead < cutoff:
value_list.append(count)
count += 1
matrix = np.zeros((len(value_list),len(value_list)))
count1 = -1
for bead1 in value_list:
count1 += 1
count2 = -1
for bead2 in value_list:
count2 += 1
with open("{}".format(matrix_path), "r") as mtx:
for line in mtx:
values = re.split(",", line)
if int(values[0]) == bead1 and int(values[1]) == bead2:
matrix[count1][count2] = float(values[2])
matrix[count2][count1] = float(values[2])
break
D = matrix
# Compute and plot first dendrogram.
fig = pylab.figure(figsize=(8,8))
ax1 = fig.add_axes([0.09,0.1,0.2,0.6])
Y = sch.linkage(D, method='average')
Z1 = sch.dendrogram(Y, orientation='right')
ax1.set_xticks([])
ax1.set_yticks([])
# # Compute and plot second dendrogram.
ax2 = fig.add_axes([0.3,0.71,0.6,0.2])
Y = sch.linkage(D, method='average')
Z2 = sch.dendrogram(Y,orientation='top')
ax2.set_xticks([])
ax2.set_yticks([])
# Plot distance matrix.
axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])
idx1 = Z1['leaves']
idx2 = Z2['leaves']
D = D[idx1,:]
D = D[:,idx2]
im = axmatrix.matshow(D, aspect='auto', origin='lower', cmap=pylab.cm.YlGnBu)
axmatrix.set_xticks(range(len(value_list)))
ordered_values = [value_list[i] for i in Z1['leaves']]
axmatrix.set_xticklabels(ordered_values)
axmatrix.xaxis.set_label_position('bottom')
axmatrix.xaxis.tick_bottom()
pylab.xticks(rotation=-90,fontsize=8)
axmatrix.set_yticks([])
# Plot colorbar.
axcolor = fig.add_axes([0.91,0.1,0.02,0.6])
pylab.colorbar(im, cax=axcolor)
#scatter plot
color = [bead_colors[i] for i in ordered_values]
aux = []
cut_max = vmax
cut_min = vmin
cut_max = mid
cut_min = mid
for t in color:
if t > cut_max:
aux.append(max(bead_values))
elif t <= cut_min:
aux.append(min(bead_values))
else:
aux.append(t)
color = aux
axmatrix.scatter(range(len(value_list)), range(len(value_list)), s=80, c=color,vmin=min(bead_values),vmax=max(bead_values),cmap=cm.Reds)
#fig.show()
try:
fig.savefig('atac_heatmap.png')
except:
pass
|
gpl-3.0
|
amolkahat/pandas
|
pandas/tests/indexes/period/test_period.py
|
2
|
20374
|
import pytest
import numpy as np
import pandas as pd
import pandas.util._test_decorators as td
from pandas.util import testing as tm
from pandas import (PeriodIndex, period_range, DatetimeIndex, NaT,
Index, Period, Series, DataFrame, date_range,
offsets)
from ..datetimelike import DatetimeLike
class TestPeriodIndex(DatetimeLike):
_holder = PeriodIndex
def setup_method(self, method):
self.indices = dict(index=tm.makePeriodIndex(10),
index_dec=period_range('20130101', periods=10,
freq='D')[::-1])
self.setup_indices()
def create_index(self):
return period_range('20130101', periods=5, freq='D')
def test_pickle_compat_construction(self):
pass
@pytest.mark.parametrize('freq', ['D', 'M', 'A'])
def test_pickle_round_trip(self, freq):
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq=freq)
result = tm.round_trip_pickle(idx)
tm.assert_index_equal(result, idx)
def test_where(self):
# This is handled in test_indexing
pass
def test_repeat(self):
# GH10183
idx = pd.period_range('2000-01-01', periods=3, freq='D')
res = idx.repeat(3)
exp = PeriodIndex(idx.values.repeat(3), freq='D')
tm.assert_index_equal(res, exp)
assert res.freqstr == 'D'
def test_fillna_period(self):
# GH 11343
idx = pd.PeriodIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], freq='H')
exp = pd.PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H')
tm.assert_index_equal(
idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
tm.assert_index_equal(idx.fillna('x'), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'),
pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
tm.assert_index_equal(idx.fillna(
pd.Period('2011-01-01', freq='D')), exp)
def test_no_millisecond_field(self):
with pytest.raises(AttributeError):
DatetimeIndex.millisecond
with pytest.raises(AttributeError):
DatetimeIndex([]).millisecond
def test_difference_freq(self):
# GH14323: difference of Period MUST preserve frequency
# but the ability to union results must be preserved
index = period_range("20160920", "20160925", freq="D")
other = period_range("20160921", "20160924", freq="D")
expected = PeriodIndex(["20160920", "20160925"], freq='D')
idx_diff = index.difference(other)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = period_range("20160922", "20160925", freq="D")
idx_diff = index.difference(other)
expected = PeriodIndex(["20160920", "20160921"], freq='D')
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
def test_hash_error(self):
index = period_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
assert isinstance(series, Series)
def test_shallow_copy_empty(self):
# GH13067
idx = PeriodIndex([], freq='M')
result = idx._shallow_copy()
expected = idx
tm.assert_index_equal(result, expected)
def test_dtype_str(self):
pi = pd.PeriodIndex([], freq='M')
assert pi.dtype_str == 'period[M]'
assert pi.dtype_str == str(pi.dtype)
pi = pd.PeriodIndex([], freq='3M')
assert pi.dtype_str == 'period[3M]'
assert pi.dtype_str == str(pi.dtype)
def test_view_asi8(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
def test_values(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx._ndarray_values, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._ndarray_values, exp)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._ndarray_values, exp)
def test_period_index_length(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
pytest.raises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
pytest.raises(ValueError, PeriodIndex, vals)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2005')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='D', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='B', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='H', start='12/31/2001', end='1/1/2002 23:00')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Min', start='12/31/2001', end='1/1/2002 00:20')
self._check_all_fields(pi)
pi = PeriodIndex(freq='S', start='12/31/2001 00:00:00',
end='12/31/2001 00:05:00')
self._check_all_fields(pi)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'dayofyear',
'quarter', 'qyear', 'days_in_month']
periods = list(periodindex)
s = pd.Series(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
assert len(periodindex) == len(field_idx)
for x, val in zip(periods, field_idx):
assert getattr(x, field) == val
if len(s) == 0:
continue
field_s = getattr(s.dt, field)
assert len(periodindex) == len(field_s)
for x, val in zip(periods, field_s):
assert getattr(x, field) == val
def test_period_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = period_range('2011/01/01', periods=6, freq='M')
idx2 = period_range('2013', periods=6, freq='A')
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.set_index(idx2)
tm.assert_index_equal(df.index, idx2)
def test_factorize(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M')
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
def test_is_(self):
create_index = lambda: PeriodIndex(freq='A', start='1/1/2001',
end='12/1/2009')
index = create_index()
assert index.is_(index)
assert not index.is_(create_index())
assert index.is_(index.view())
assert index.is_(index.view().view().view().view().view())
assert index.view().is_(index)
ind2 = index.view()
index.name = "Apple"
assert ind2.is_(index)
assert not index.is_(index[:])
assert not index.is_(index.asfreq('M'))
assert not index.is_(index.asfreq('A'))
assert not index.is_(index - 2)
assert not index.is_(index - 0)
def test_contains(self):
rng = period_range('2007-01', freq='M', periods=10)
assert Period('2007-01', freq='M') in rng
assert not Period('2007-01', freq='D') in rng
assert not Period('2007-01', freq='2M') in rng
def test_contains_nat(self):
# see gh-13582
idx = period_range('2007-01', freq='M', periods=10)
assert pd.NaT not in idx
assert None not in idx
assert float('nan') not in idx
assert np.nan not in idx
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
assert pd.NaT in idx
assert None in idx
assert float('nan') in idx
assert np.nan in idx
def test_periods_number_check(self):
with pytest.raises(ValueError):
period_range('2011-1-1', '2012-1-1', 'B')
def test_start_time(self):
# GH 17157
index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='MS')
tm.assert_index_equal(index.start_time, expected_index)
def test_end_time(self):
# GH 17157
index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='M')
expected_index = expected_index.shift(1, freq='D').shift(-1, freq='ns')
tm.assert_index_equal(index.end_time, expected_index)
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[1:3]
tm.assert_series_equal(result, expected)
result[:] = 1
assert (ts[1:3] == 1).all()
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[idx == 2007]
tm.assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN')
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN',
tz='US/Eastern')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN',
tz='US/Eastern')
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
def test_shift(self):
# This is tested in test_arithmetic
pass
@td.skip_if_32bit
def test_ndarray_compat_properties(self):
super(TestPeriodIndex, self).test_ndarray_compat_properties()
def test_negative_ordinals(self):
Period(ordinal=-1000, freq='A')
Period(ordinal=0, freq='A')
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
tm.assert_index_equal(idx1, idx2)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2012-03', '2012-04'], freq='D', name='name')
exp = Index([2011, 2011, -1, 2012, 2012], dtype=np.int64, name='name')
tm.assert_index_equal(idx.year, exp)
exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name='name')
tm.assert_index_equal(idx.month, exp)
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
assert s['05Q4'] == s[2]
def test_numpy_repeat(self):
index = period_range('20010101', periods=2)
expected = PeriodIndex([Period('2001-01-01'), Period('2001-01-01'),
Period('2001-01-02'), Period('2001-01-02')])
tm.assert_index_equal(np.repeat(index, 2), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, index, 2, axis=1)
def test_pindex_multiples(self):
pi = PeriodIndex(start='1/1/11', end='12/31/11', freq='2M')
expected = PeriodIndex(['2011-01', '2011-03', '2011-05', '2011-07',
'2011-09', '2011-11'], freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
pi = period_range(start='1/1/11', end='12/31/11', freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
pi = period_range(start='1/1/11', periods=6, freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
def test_iteration(self):
index = PeriodIndex(start='1/1/10', periods=4, freq='B')
result = list(index)
assert isinstance(result[0], Period)
assert result[0].freq == index.freq
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2006, 2007], freq='A')
assert index.is_full
index = PeriodIndex([2005, 2005, 2007], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2005, 2006], freq='A')
assert index.is_full
index = PeriodIndex([2006, 2005, 2005], freq='A')
pytest.raises(ValueError, getattr, index, 'is_full')
assert index[:0].is_full
def test_with_multi_index(self):
# #1705
index = date_range('1/1/2012', periods=4, freq='12H')
index_as_arrays = [index.to_period(freq='D'), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
assert isinstance(s.index.levels[0], PeriodIndex)
assert isinstance(s.index.values[0][0], Period)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
periods = list(rng)
result = pd.Index(periods)
assert isinstance(result, PeriodIndex)
def test_append_concat(self):
# #1815
d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC')
d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC')
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_pickle_freq(self):
# GH2891
prng = period_range('1/1/2011', '1/1/2012', freq='M')
new_prng = tm.round_trip_pickle(prng)
assert new_prng.freq == offsets.MonthEnd()
assert new_prng.freqstr == 'M'
def test_map(self):
# test_map_dictlike generally tests
index = PeriodIndex([2005, 2007, 2009], freq='A')
result = index.map(lambda x: x.ordinal)
exp = Index([x.ordinal for x in index])
tm.assert_index_equal(result, exp)
def test_join_self(self, join_type):
index = period_range('1/1/2000', periods=10)
joined = index.join(index, how=join_type)
assert index is joined
def test_insert(self):
# GH 18295 (test missing)
expected = PeriodIndex(
['2017Q1', pd.NaT, '2017Q2', '2017Q3', '2017Q4'], freq='Q')
for na in (np.nan, pd.NaT, None):
result = period_range('2017Q1', periods=4, freq='Q').insert(1, na)
tm.assert_index_equal(result, expected)
|
bsd-3-clause
|
ycaihua/scikit-learn
|
examples/applications/wikipedia_principal_eigenvector.py
|
41
|
7742
|
"""
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
import urllib
print("Downloading data from '%s', please wait..." % url)
opener = urllib.urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in index_map.iteritems())
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
|
bsd-3-clause
|
Superchicken1/SambaFlow
|
python/traffic-prediction/src/models/NN_currentSituationWithWeather.py
|
1
|
3831
|
import tensorflow as tf
import numpy as np
import pandas as pd
import src.vector_gen.generateCurrentSituationWithWeather as vecX
import src.vector_gen.generate_VectorY as vecY
import src.misc.split_train_valid_notRandom as split
import src.misc.evaluation as evaluation
import src.misc.paths as paths
df_trajectories = pd.read_csv(paths.trajectories_training_file)
df_weather = pd.read_csv(paths.weather_training_file)
# Create dataset
df_X = vecX.generate_x(df_trajectories, df_weather)
df_Y = vecY.generate_VectorY_df(df_trajectories)
# Split data into training, validation and testing sets
split.split_dataset(df_X, df_Y)
training_Y = pd.read_csv("../../../../python/traffic-prediction/src/misc/splitting_csv_files/train_Y.csv", index_col =0)
validation_Y = pd.read_csv("../../../../python/traffic-prediction/src/misc/splitting_csv_files/valid_Y.csv", index_col =0)
testing_Y = pd.read_csv("../../../../python/traffic-prediction/src/misc/splitting_csv_files/test_Y.csv", index_col =0)
training_X = pd.read_csv("../../../../python/traffic-prediction/src/misc/splitting_csv_files/train_X.csv", index_col =0)
validation_X = pd.read_csv("../../../../python/traffic-prediction/src/misc/splitting_csv_files/valid_X.csv", index_col =0)
testing_X = pd.read_csv("../../../../python/traffic-prediction/src/misc/splitting_csv_files/test_X.csv", index_col =0)
#-------------------------------------------------------------------------------------------------------------------------
# Tensorflow - linear regression
def feature_normalize(train_X):
global mean, std
mean = np.mean(train_X, axis=0)
std = np.std(train_X, axis=0)
return abs((train_X - mean) / std)
def run_regression(X_train, Y_train, X_test, Y_test, lambda_value = 0.1, normalize=False, batch_size=10, alpha=1e-8):
x_train = feature_normalize(X_train) if normalize else X_train
y_train = Y_train
x_test = X_test
y_test = Y_test
session = tf.Session()
number_rows = training_X.shape[0]
number_col_x = training_X.shape[1]
number_col_y = training_Y.shape[1]
X = tf.placeholder('float', [None, number_col_x], name="X")
Y = tf.placeholder('float', [None, number_col_y], name="Y")
theta = tf.Variable(tf.random_normal([number_col_x, number_col_y], stddev=0.01), name="Theta")
lambda_val = tf.constant(lambda_value)
y_predicted = tf.matmul(X, theta)
with tf.name_scope('cost') as scope:
cost_func = (tf.nn.l2_loss(y_predicted - Y) + lambda_val * tf.nn.l2_loss(theta))/float(batch_size)
cost_summary = tf.summary.scalar('cost', cost_func)
training_func = tf.train.GradientDescentOptimizer(alpha).minimize(cost_func)
with tf.name_scope("test") as scope:
correct_prediction = tf.subtract(tf.cast(1, 'float'), tf.reduce_mean(tf.subtract(y_predicted, Y)))
accuracy = tf.cast(correct_prediction, "float")
saver = tf.train.Saver()
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("/tmp", session.graph)
init = tf.global_variables_initializer()
session.run(init)
for i in range(1, int(len(x_train)/batch_size)):
session.run(training_func, feed_dict={X: x_train[i*batch_size:i*batch_size+batch_size], Y: y_train[i*batch_size:i*batch_size+batch_size]})
if i % batch_size == 0:
print("test accuracy %g"%session.run(accuracy, feed_dict={X: x_test, Y: y_test}))
print("final test accuracy %g"%session.run(accuracy, feed_dict={X: x_test, Y: y_test}))
prediction = y_predicted.eval(feed_dict={X: x_test}, session = session)
mape = evaluation.mape(prediction, y_test)
mean_mape = np.mean(np.array(mape))
print("MAPE: %g" % mean_mape)
session.close()
run_regression(training_X, training_Y, testing_X, testing_Y, normalize=False, lambda_value = 0.1, batch_size=10)
|
apache-2.0
|
siutanwong/scikit-learn
|
benchmarks/bench_20newsgroups.py
|
377
|
3555
|
from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
|
bsd-3-clause
|
huggingface/pytorch-transformers
|
examples/research_projects/rag/eval_rag.py
|
3
|
11101
|
""" Evaluation script for RAG models."""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, f1_score # noqa: E402 # isort:skip
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def infer_model_type(model_name_or_path):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
return max(metric_fn(prediction, gt) for gt in ground_truths)
def get_scores(args, preds_path, gold_data_path):
hypos = [line.strip() for line in open(preds_path, "r").readlines()]
answers = []
if args.gold_data_mode == "qa":
data = pd.read_csv(gold_data_path, sep="\t", header=None)
for answer_list in data[1]:
ground_truths = ast.literal_eval(answer_list)
answers.append(ground_truths)
else:
references = [line.strip() for line in open(gold_data_path, "r").readlines()]
answers = [[reference] for reference in references]
f1 = em = total = 0
for prediction, ground_truths in zip(hypos, answers):
total += 1
em += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
em = 100.0 * em / total
f1 = 100.0 * f1 / total
logger.info(f"F1: {f1:.2f}")
logger.info(f"EM: {em:.2f}")
def get_precision_at_k(args, preds_path, gold_data_path):
k = args.k
hypos = [line.strip() for line in open(preds_path, "r").readlines()]
references = [line.strip() for line in open(gold_data_path, "r").readlines()]
em = total = 0
for hypo, reference in zip(hypos, references):
hypo_provenance = set(hypo.split("\t")[:k])
ref_provenance = set(reference.split("\t"))
total += 1
em += len(hypo_provenance & ref_provenance) / k
em = 100.0 * em / total
logger.info(f"Precision@{k}: {em: .2f}")
def evaluate_batch_retrieval(args, rag_model, questions):
def strip_title(title):
if title.startswith('"'):
title = title[1:]
if title.endswith('"'):
title = title[:-1]
return title
retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
questions,
return_tensors="pt",
padding=True,
truncation=True,
)["input_ids"].to(args.device)
question_enc_outputs = rag_model.rag.question_encoder(retriever_input_ids)
question_enc_pool_output = question_enc_outputs[0]
result = rag_model.retriever(
retriever_input_ids,
question_enc_pool_output.cpu().detach().to(torch.float32).numpy(),
prefix=rag_model.rag.generator.config.prefix,
n_docs=rag_model.config.n_docs,
return_tensors="pt",
)
all_docs = rag_model.retriever.index.get_doc_dicts(result.doc_ids)
provenance_strings = []
for docs in all_docs:
provenance = [strip_title(title) for title in docs["title"]]
provenance_strings.append("\t".join(provenance))
return provenance_strings
def evaluate_batch_e2e(args, rag_model, questions):
with torch.no_grad():
inputs_dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
questions, return_tensors="pt", padding=True, truncation=True
)
input_ids = inputs_dict.input_ids.to(args.device)
attention_mask = inputs_dict.attention_mask.to(args.device)
outputs = rag_model.generate( # rag_model overwrites generate
input_ids,
attention_mask=attention_mask,
num_beams=args.num_beams,
min_length=args.min_length,
max_length=args.max_length,
early_stopping=False,
num_return_sequences=1,
bad_words_ids=[[0, 0]], # BART likes to repeat BOS tokens, dont allow it to generate more than one
)
answers = rag_model.retriever.generator_tokenizer.batch_decode(outputs, skip_special_tokens=True)
if args.print_predictions:
for q, a in zip(questions, answers):
logger.info("Q: {} - A: {}".format(q, a))
return answers
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token", "bart"],
type=str,
help="RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the model_name_or_path",
)
parser.add_argument(
"--index_name",
default=None,
choices=["exact", "compressed", "legacy"],
type=str,
help="RAG model retriever type",
)
parser.add_argument(
"--index_path",
default=None,
type=str,
help="Path to the retrieval index",
)
parser.add_argument("--n_docs", default=5, type=int, help="Number of retrieved docs")
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained checkpoints or model identifier from huggingface.co/models",
)
parser.add_argument(
"--eval_mode",
choices=["e2e", "retrieval"],
default="e2e",
type=str,
help="Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates precision@k.",
)
parser.add_argument("--k", default=1, type=int, help="k for the precision@k calculation")
parser.add_argument(
"--evaluation_set",
default=None,
type=str,
required=True,
help="Path to a file containing evaluation samples",
)
parser.add_argument(
"--gold_data_path",
default=None,
type=str,
required=True,
help="Path to a tab-separated file with gold samples",
)
parser.add_argument(
"--gold_data_mode",
default="qa",
type=str,
choices=["qa", "ans"],
help="Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string",
)
parser.add_argument(
"--predictions_path",
type=str,
default="predictions.txt",
help="Name of the predictions file, to be stored in the checkpoints directory",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument(
"--eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--recalculate",
help="Recalculate predictions even if the prediction file exists",
action="store_true",
)
parser.add_argument(
"--num_beams",
default=4,
type=int,
help="Number of beams to be used when generating answers",
)
parser.add_argument("--min_length", default=1, type=int, help="Min length of the generated answers")
parser.add_argument("--max_length", default=50, type=int, help="Max length of the generated answers")
parser.add_argument(
"--print_predictions",
action="store_true",
help="If True, prints predictions while evaluating.",
)
parser.add_argument(
"--print_docs",
action="store_true",
help="If True, prints docs retried while generating.",
)
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
return args
def main(args):
model_kwargs = {}
if args.model_type is None:
args.model_type = infer_model_type(args.model_name_or_path)
assert args.model_type is not None
if args.model_type.startswith("rag"):
model_class = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
model_kwargs["n_docs"] = args.n_docs
if args.index_name is not None:
model_kwargs["index_name"] = args.index_name
if args.index_path is not None:
model_kwargs["index_path"] = args.index_path
else:
model_class = BartForConditionalGeneration
checkpoints = (
[f.path for f in os.scandir(args.model_name_or_path) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
score_fn = get_scores if args.eval_mode == "e2e" else get_precision_at_k
evaluate_batch_fn = evaluate_batch_e2e if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path))
score_fn(args, args.predictions_path, args.gold_data_path)
continue
logger.info("***** Running evaluation for {} *****".format(checkpoint))
logger.info(" Batch size = %d", args.eval_batch_size)
logger.info(" Predictions will be stored under {}".format(args.predictions_path))
if args.model_type.startswith("rag"):
retriever = RagRetriever.from_pretrained(checkpoint, **model_kwargs)
model = model_class.from_pretrained(checkpoint, retriever=retriever, **model_kwargs)
model.retriever.init_retrieval()
else:
model = model_class.from_pretrained(checkpoint, **model_kwargs)
model.to(args.device)
with open(args.evaluation_set, "r") as eval_file, open(args.predictions_path, "w") as preds_file:
questions = []
for line in tqdm(eval_file):
questions.append(line.strip())
if len(questions) == args.eval_batch_size:
answers = evaluate_batch_fn(args, model, questions)
preds_file.write("\n".join(answers) + "\n")
preds_file.flush()
questions = []
if len(questions) > 0:
answers = evaluate_batch_fn(args, model, questions)
preds_file.write("\n".join(answers))
preds_file.flush()
score_fn(args, args.predictions_path, args.gold_data_path)
if __name__ == "__main__":
args = get_args()
main(args)
|
apache-2.0
|
cbuntain/UMD_HCIL_TREC2015
|
src/main/python/topicFilter/debugger.py
|
1
|
2837
|
#!/usr/bin/python
import codecs
import json
import re
import sys
import time
from nltk.stem import WordNetLemmatizer
import pandas as pd
if ( len(sys.argv) < 4 ):
print "Usage: %s <trec_topics.json> <sparkTrecOutput.csv> <output_file.csv>" % (sys.argv[0])
exit(1)
topicsFilePath = sys.argv[1]
sparkCsvFilePath = sys.argv[2]
outputPath = sys.argv[3]
topicsJsonObj = None
with codecs.open(topicsFilePath, "r", "utf-8") as f:
topicsJsonObj = json.load(f)
wordToTopicMap = {}
topicTimeMap = {}
for topic in topicsJsonObj:
topicTitle = topic["title"]
topicNum = topic["num"]
tokens = topic["tokens"]
for token in tokens:
if ( token not in wordToTopicMap ):
wordToTopicMap[token] = [(topicNum,topicTitle)]
else:
wordToTopicMap[token].append((topicNum,topicTitle))
topicTimeMap[topicNum] = {}
wnl = WordNetLemmatizer()
specCharRegex = re.compile(r"[^a-zA-Z0-9\\s]")
outputRows = []
with codecs.open(sparkCsvFilePath, "r", "utf-8") as f:
df = pd.read_csv(sparkCsvFilePath, header=None)
for (id, row) in df.iterrows():
topicNums = row[0]
captureTime = row[1]
tweetId = row[2]
tweetText = row[3]
gmTime = time.gmtime(captureTime)
timeTuple = (gmTime.tm_year, gmTime.tm_mon, gmTime.tm_mday)
timeStr = "%d-%d-%d" % (gmTime.tm_year, gmTime.tm_mon, gmTime.tm_mday)
cleanTokens = specCharRegex.sub(" ", tweetText.lower(), count=0)
tokens = set([wnl.lemmatize(x) for x in cleanTokens.split(" ")])
localTopicCountMap = {}
localTopics = []
for token in tokens:
if ( token in wordToTopicMap ):
localTopics.extend(wordToTopicMap[token])
for x in wordToTopicMap[token]:
thisTopicNum = x[0]
if ( thisTopicNum not in localTopicCountMap ):
localTopicCountMap[thisTopicNum] = 1
else:
localTopicCountMap[thisTopicNum] += 1
for localTopic in localTopics:
if ( localTopicCountMap[localTopic[0]] < 2 ):
continue
if ( timeTuple in topicTimeMap[localTopic[0]] and len(topicTimeMap[localTopic[0]][timeTuple]) > 10 ):
continue
if ( timeTuple not in topicTimeMap[localTopic[0]] ):
topicTimeMap[localTopic[0]][timeTuple] = [tweetId]
else:
topicTimeMap[localTopic[0]][timeTuple].append(tweetId)
item = {"topic":localTopic[0], "title": localTopic[1], "time":captureTime, "date":timeStr, "id":tweetId, "text":tweetText}
outputRows.append(item)
outputDf = pd.DataFrame(outputRows)
outputDf.to_csv(outputPath, columns=["topic", "title", "time", "date", "id", "text"], index=False)
|
apache-2.0
|
smartscheduling/scikit-learn-categorical-tree
|
sklearn/neighbors/tests/test_approximate.py
|
142
|
18692
|
"""
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
|
bsd-3-clause
|
stuartarchibald/numba
|
numba/tests/test_extending.py
|
3
|
57247
|
import math
import operator
import sys
import pickle
import multiprocessing
import ctypes
import warnings
from distutils.version import LooseVersion
import re
import numpy as np
from numba import njit, jit, vectorize, guvectorize, objmode
from numba.core import types, errors, typing, compiler, cgutils
from numba.core.typed_passes import type_inference_stage
from numba.core.registry import cpu_target
from numba.core.compiler import compile_isolated
from numba.tests.support import (
TestCase,
captured_stdout,
temp_directory,
override_config,
run_in_new_process_in_cache_dir,
skip_if_typeguard,
)
from numba.core.errors import LoweringError
import unittest
from numba.extending import (
typeof_impl,
type_callable,
lower_builtin,
lower_cast,
overload,
overload_attribute,
overload_method,
models,
register_model,
box,
unbox,
NativeValue,
intrinsic,
_Intrinsic,
register_jitable,
get_cython_function_address,
is_jitted,
overload_classmethod,
)
from numba.core.typing.templates import (
ConcreteTemplate,
signature,
infer,
infer_global,
AbstractTemplate,
)
# Pandas-like API implementation
from .pdlike_usecase import Index, Series
try:
import scipy
if LooseVersion(scipy.__version__) < "0.19":
sc = None
else:
import scipy.special.cython_special as sc
except ImportError:
sc = None
# -----------------------------------------------------------------------
# Define a custom type and an implicit cast on it
class MyDummy(object):
pass
class MyDummyType(types.Opaque):
def can_convert_to(self, context, toty):
if isinstance(toty, types.Number):
from numba.core.typeconv import Conversion
return Conversion.safe
mydummy_type = MyDummyType("mydummy")
mydummy = MyDummy()
@typeof_impl.register(MyDummy)
def typeof_mydummy(val, c):
return mydummy_type
@lower_cast(MyDummyType, types.Number)
def mydummy_to_number(context, builder, fromty, toty, val):
"""
Implicit conversion from MyDummy to int.
"""
return context.get_constant(toty, 42)
def get_dummy():
return mydummy
register_model(MyDummyType)(models.OpaqueModel)
@unbox(MyDummyType)
def unbox_index(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
# -----------------------------------------------------------------------
# Define a second custom type but w/o implicit cast to Number
def base_dummy_type_factory(name):
class DynType(object):
pass
class DynTypeType(types.Opaque):
pass
dyn_type_type = DynTypeType(name)
@typeof_impl.register(DynType)
def typeof_mydummy(val, c):
return dyn_type_type
register_model(DynTypeType)(models.OpaqueModel)
return DynTypeType, DynType, dyn_type_type
MyDummyType2, MyDummy2, mydummy_type_2 = base_dummy_type_factory("mydummy2")
@unbox(MyDummyType2)
def unbox_index2(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
# -----------------------------------------------------------------------
# Define a function's typing and implementation using the classical
# two-step API
def func1(x=None):
raise NotImplementedError
def type_func1_(context):
def typer(x=None):
if x in (None, types.none):
# 0-arg or 1-arg with None
return types.int32
elif isinstance(x, types.Float):
# 1-arg with float
return x
return typer
type_func1 = type_callable(func1)(type_func1_)
@lower_builtin(func1)
@lower_builtin(func1, types.none)
def func1_nullary(context, builder, sig, args):
return context.get_constant(sig.return_type, 42)
@lower_builtin(func1, types.Float)
def func1_unary(context, builder, sig, args):
def func1_impl(x):
return math.sqrt(2 * x)
return context.compile_internal(builder, func1_impl, sig, args)
# We can do the same for a known internal operation, here "print_item"
# which we extend to support MyDummyType.
@infer
class PrintDummy(ConcreteTemplate):
key = "print_item"
cases = [signature(types.none, mydummy_type)]
@lower_builtin("print_item", MyDummyType)
def print_dummy(context, builder, sig, args):
[x] = args
pyapi = context.get_python_api(builder)
strobj = pyapi.unserialize(pyapi.serialize_object("hello!"))
pyapi.print_object(strobj)
pyapi.decref(strobj)
return context.get_dummy_value()
# -----------------------------------------------------------------------
# Define an overloaded function (combined API)
def where(cond, x, y):
raise NotImplementedError
def np_where(cond, x, y):
"""
Wrap np.where() to allow for keyword arguments
"""
return np.where(cond, x, y)
def call_where(cond, x, y):
return where(cond, y=y, x=x)
@overload(where)
def overload_where_arrays(cond, x, y):
"""
Implement where() for arrays.
"""
# Choose implementation based on argument types.
if isinstance(cond, types.Array):
if x.dtype != y.dtype:
raise errors.TypingError("x and y should have the same dtype")
# Array where() => return an array of the same shape
if all(ty.layout == "C" for ty in (cond, x, y)):
def where_impl(cond, x, y):
"""
Fast implementation for C-contiguous arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
cf = cond.flat
xf = x.flat
yf = y.flat
rf = res.flat
for i in range(cond.size):
rf[i] = xf[i] if cf[i] else yf[i]
return res
else:
def where_impl(cond, x, y):
"""
Generic implementation for other arrays
"""
shape = cond.shape
if x.shape != shape or y.shape != shape:
raise ValueError("all inputs should have the same shape")
res = np.empty_like(x)
for idx, c in np.ndenumerate(cond):
res[idx] = x[idx] if c else y[idx]
return res
return where_impl
# We can define another overload function for the same function, they
# will be tried in turn until one succeeds.
@overload(where)
def overload_where_scalars(cond, x, y):
"""
Implement where() for scalars.
"""
if not isinstance(cond, types.Array):
if x != y:
raise errors.TypingError("x and y should have the same type")
def where_impl(cond, x, y):
"""
Scalar where() => return a 0-dim array
"""
scal = x if cond else y
# Can't use full_like() on Numpy < 1.8
arr = np.empty_like(scal)
arr[()] = scal
return arr
return where_impl
# -----------------------------------------------------------------------
# Overload an already defined built-in function, extending it for new types.
@overload(len)
def overload_len_dummy(arg):
if isinstance(arg, MyDummyType):
def len_impl(arg):
return 13
return len_impl
@overload(operator.add)
def overload_add_dummy(arg1, arg2):
if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance(
arg2, (MyDummyType, MyDummyType2)
):
def dummy_add_impl(arg1, arg2):
return 42
return dummy_add_impl
@overload(operator.delitem)
def overload_dummy_delitem(obj, idx):
if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer):
def dummy_delitem_impl(obj, idx):
print("del", obj, idx)
return dummy_delitem_impl
@overload(operator.getitem)
def overload_dummy_getitem(obj, idx):
if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer):
def dummy_getitem_impl(obj, idx):
return idx + 123
return dummy_getitem_impl
@overload(operator.setitem)
def overload_dummy_setitem(obj, idx, val):
if all(
[
isinstance(obj, MyDummyType),
isinstance(idx, types.Integer),
isinstance(val, types.Integer),
]
):
def dummy_setitem_impl(obj, idx, val):
print(idx, val)
return dummy_setitem_impl
def call_add_operator(arg1, arg2):
return operator.add(arg1, arg2)
def call_add_binop(arg1, arg2):
return arg1 + arg2
@overload(operator.iadd)
def overload_iadd_dummy(arg1, arg2):
if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance(
arg2, (MyDummyType, MyDummyType2)
):
def dummy_iadd_impl(arg1, arg2):
return 42
return dummy_iadd_impl
def call_iadd_operator(arg1, arg2):
return operator.add(arg1, arg2)
def call_iadd_binop(arg1, arg2):
arg1 += arg2
return arg1
def call_delitem(obj, idx):
del obj[idx]
def call_getitem(obj, idx):
return obj[idx]
def call_setitem(obj, idx, val):
obj[idx] = val
@overload_method(MyDummyType, "length")
def overload_method_length(arg):
def imp(arg):
return len(arg)
return imp
def cache_overload_method_usecase(x):
return x.length()
def call_func1_nullary():
return func1()
def call_func1_unary(x):
return func1(x)
def len_usecase(x):
return len(x)
def print_usecase(x):
print(x)
def getitem_usecase(x, key):
return x[key]
def npyufunc_usecase(x):
return np.cos(np.sin(x))
def get_data_usecase(x):
return x._data
def get_index_usecase(x):
return x._index
def is_monotonic_usecase(x):
return x.is_monotonic_increasing
def make_series_usecase(data, index):
return Series(data, index)
def clip_usecase(x, lo, hi):
return x.clip(lo, hi)
# -----------------------------------------------------------------------
def return_non_boxable():
return np
@overload(return_non_boxable)
def overload_return_non_boxable():
def imp():
return np
return imp
def non_boxable_ok_usecase(sz):
mod = return_non_boxable()
return mod.arange(sz)
def non_boxable_bad_usecase():
return return_non_boxable()
def mk_func_input(f):
pass
@infer_global(mk_func_input)
class MkFuncTyping(AbstractTemplate):
def generic(self, args, kws):
assert isinstance(args[0], types.MakeFunctionLiteral)
return signature(types.none, *args)
def mk_func_test_impl():
mk_func_input(lambda a: a)
# -----------------------------------------------------------------------
@overload(np.exp)
def overload_np_exp(obj):
if isinstance(obj, MyDummyType):
def imp(obj):
# Returns a constant if a MyDummyType is seen
return 0xDEADBEEF
return imp
class TestLowLevelExtending(TestCase):
"""
Test the low-level two-tier extension API.
"""
# We check with both @jit and compile_isolated(), to exercise the
# registration logic.
def test_func1(self):
pyfunc = call_func1_nullary
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(), 42)
pyfunc = call_func1_unary
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(None), 42)
self.assertPreciseEqual(cfunc(18.0), 6.0)
def test_func1_isolated(self):
pyfunc = call_func1_nullary
cr = compile_isolated(pyfunc, ())
self.assertPreciseEqual(cr.entry_point(), 42)
pyfunc = call_func1_unary
cr = compile_isolated(pyfunc, (types.float64,))
self.assertPreciseEqual(cr.entry_point(18.0), 6.0)
def test_type_callable_keeps_function(self):
self.assertIs(type_func1, type_func1_)
self.assertIsNotNone(type_func1)
def test_cast_mydummy(self):
pyfunc = get_dummy
cr = compile_isolated(pyfunc, (), types.float64)
self.assertPreciseEqual(cr.entry_point(), 42.0)
def test_mk_func_literal(self):
"""make sure make_function is passed to typer class as a literal
"""
test_ir = compiler.run_frontend(mk_func_test_impl)
typingctx = cpu_target.typing_context
targetctx = cpu_target.target_context
typingctx.refresh()
targetctx.refresh()
typing_res = type_inference_stage(typingctx, targetctx, test_ir, (),
None)
self.assertTrue(
any(
isinstance(a, types.MakeFunctionLiteral)
for a in typing_res.typemap.values()
)
)
class TestPandasLike(TestCase):
"""
Test implementing a pandas-like Index object.
Also stresses most of the high-level API.
"""
def test_index_len(self):
i = Index(np.arange(3))
cfunc = jit(nopython=True)(len_usecase)
self.assertPreciseEqual(cfunc(i), 3)
def test_index_getitem(self):
i = Index(np.int32([42, 8, -5]))
cfunc = jit(nopython=True)(getitem_usecase)
self.assertPreciseEqual(cfunc(i, 1), 8)
ii = cfunc(i, slice(1, None))
self.assertIsInstance(ii, Index)
self.assertEqual(list(ii), [8, -5])
def test_index_ufunc(self):
"""
Check Numpy ufunc on an Index object.
"""
i = Index(np.int32([42, 8, -5]))
cfunc = jit(nopython=True)(npyufunc_usecase)
ii = cfunc(i)
self.assertIsInstance(ii, Index)
self.assertPreciseEqual(ii._data, np.cos(np.sin(i._data)))
def test_index_get_data(self):
# The _data attribute is exposed with make_attribute_wrapper()
i = Index(np.int32([42, 8, -5]))
cfunc = jit(nopython=True)(get_data_usecase)
data = cfunc(i)
self.assertIs(data, i._data)
def test_index_is_monotonic(self):
# The is_monotonic_increasing attribute is exposed with
# overload_attribute()
cfunc = jit(nopython=True)(is_monotonic_usecase)
for values, expected in [
([8, 42, 5], False),
([5, 8, 42], True),
([], True),
]:
i = Index(np.int32(values))
got = cfunc(i)
self.assertEqual(got, expected)
def test_series_len(self):
i = Index(np.int32([2, 4, 3]))
s = Series(np.float64([1.5, 4.0, 2.5]), i)
cfunc = jit(nopython=True)(len_usecase)
self.assertPreciseEqual(cfunc(s), 3)
def test_series_get_index(self):
i = Index(np.int32([2, 4, 3]))
s = Series(np.float64([1.5, 4.0, 2.5]), i)
cfunc = jit(nopython=True)(get_index_usecase)
got = cfunc(s)
self.assertIsInstance(got, Index)
self.assertIs(got._data, i._data)
def test_series_ufunc(self):
"""
Check Numpy ufunc on an Series object.
"""
i = Index(np.int32([42, 8, -5]))
s = Series(np.int64([1, 2, 3]), i)
cfunc = jit(nopython=True)(npyufunc_usecase)
ss = cfunc(s)
self.assertIsInstance(ss, Series)
self.assertIsInstance(ss._index, Index)
self.assertIs(ss._index._data, i._data)
self.assertPreciseEqual(ss._values, np.cos(np.sin(s._values)))
def test_series_constructor(self):
i = Index(np.int32([42, 8, -5]))
d = np.float64([1.5, 4.0, 2.5])
cfunc = jit(nopython=True)(make_series_usecase)
got = cfunc(d, i)
self.assertIsInstance(got, Series)
self.assertIsInstance(got._index, Index)
self.assertIs(got._index._data, i._data)
self.assertIs(got._values, d)
def test_series_clip(self):
i = Index(np.int32([42, 8, -5]))
s = Series(np.float64([1.5, 4.0, 2.5]), i)
cfunc = jit(nopython=True)(clip_usecase)
ss = cfunc(s, 1.6, 3.0)
self.assertIsInstance(ss, Series)
self.assertIsInstance(ss._index, Index)
self.assertIs(ss._index._data, i._data)
self.assertPreciseEqual(ss._values, np.float64([1.6, 3.0, 2.5]))
class TestHighLevelExtending(TestCase):
"""
Test the high-level combined API.
"""
def test_where(self):
"""
Test implementing a function with @overload.
"""
pyfunc = call_where
cfunc = jit(nopython=True)(pyfunc)
def check(*args, **kwargs):
expected = np_where(*args, **kwargs)
got = cfunc(*args, **kwargs)
self.assertPreciseEqual(expected, got)
check(x=3, cond=True, y=8)
check(True, 3, 8)
check(
np.bool_([True, False, True]),
np.int32([1, 2, 3]),
np.int32([4, 5, 5]),
)
# The typing error is propagated
with self.assertRaises(errors.TypingError) as raises:
cfunc(np.bool_([]), np.int32([]), np.int64([]))
self.assertIn(
"x and y should have the same dtype", str(raises.exception)
)
def test_len(self):
"""
Test re-implementing len() for a custom type with @overload.
"""
cfunc = jit(nopython=True)(len_usecase)
self.assertPreciseEqual(cfunc(MyDummy()), 13)
self.assertPreciseEqual(cfunc([4, 5]), 2)
def test_print(self):
"""
Test re-implementing print() for a custom type with @overload.
"""
cfunc = jit(nopython=True)(print_usecase)
with captured_stdout():
cfunc(MyDummy())
self.assertEqual(sys.stdout.getvalue(), "hello!\n")
def test_add_operator(self):
"""
Test re-implementing operator.add() for a custom type with @overload.
"""
pyfunc = call_add_operator
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(1, 2), 3)
self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42)
# this will call add(Number, Number) as MyDummy implicitly casts to
# Number
self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84)
def test_add_binop(self):
"""
Test re-implementing '+' for a custom type via @overload(operator.add).
"""
pyfunc = call_add_binop
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(1, 2), 3)
self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42)
# this will call add(Number, Number) as MyDummy implicitly casts to
# Number
self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84)
def test_iadd_operator(self):
"""
Test re-implementing operator.add() for a custom type with @overload.
"""
pyfunc = call_iadd_operator
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(1, 2), 3)
self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42)
# this will call add(Number, Number) as MyDummy implicitly casts to
# Number
self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84)
def test_iadd_binop(self):
"""
Test re-implementing '+' for a custom type via @overload(operator.add).
"""
pyfunc = call_iadd_binop
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(1, 2), 3)
self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42)
# this will call add(Number, Number) as MyDummy implicitly casts to
# Number
self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84)
def test_delitem(self):
pyfunc = call_delitem
cfunc = jit(nopython=True)(pyfunc)
obj = MyDummy()
e = None
with captured_stdout() as out:
try:
cfunc(obj, 321)
except Exception as exc:
e = exc
if e is not None:
raise e
self.assertEqual(out.getvalue(), "del hello! 321\n")
def test_getitem(self):
pyfunc = call_getitem
cfunc = jit(nopython=True)(pyfunc)
self.assertPreciseEqual(cfunc(MyDummy(), 321), 321 + 123)
def test_setitem(self):
pyfunc = call_setitem
cfunc = jit(nopython=True)(pyfunc)
obj = MyDummy()
e = None
with captured_stdout() as out:
try:
cfunc(obj, 321, 123)
except Exception as exc:
e = exc
if e is not None:
raise e
self.assertEqual(out.getvalue(), "321 123\n")
def test_no_cpython_wrapper(self):
"""
Test overloading whose return value cannot be represented in CPython.
"""
# Test passing Module type from a @overload implementation to ensure
# that the *no_cpython_wrapper* flag works
ok_cfunc = jit(nopython=True)(non_boxable_ok_usecase)
n = 10
got = ok_cfunc(n)
expect = non_boxable_ok_usecase(n)
np.testing.assert_equal(expect, got)
# Verify that the Module type cannot be returned to CPython
bad_cfunc = jit(nopython=True)(non_boxable_bad_usecase)
with self.assertRaises(TypeError) as raises:
bad_cfunc()
errmsg = str(raises.exception)
expectmsg = "cannot convert native Module"
self.assertIn(expectmsg, errmsg)
def test_typing_vs_impl_signature_mismatch_handling(self):
"""
Tests that an overload which has a differing typing and implementing
signature raises an exception.
"""
def gen_ol(impl=None):
def myoverload(a, b, c, kw=None):
pass
@overload(myoverload)
def _myoverload_impl(a, b, c, kw=None):
return impl
@jit(nopython=True)
def foo(a, b, c, d):
myoverload(a, b, c, kw=d)
return foo
sentinel = "Typing and implementation arguments differ in"
# kwarg value is different
def impl1(a, b, c, kw=12):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl1)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("keyword argument default values", msg)
self.assertIn('<Parameter "kw=12">', msg)
self.assertIn('<Parameter "kw=None">', msg)
# kwarg name is different
def impl2(a, b, c, kwarg=None):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl2)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("keyword argument names", msg)
self.assertIn('<Parameter "kwarg=None">', msg)
self.assertIn('<Parameter "kw=None">', msg)
# arg name is different
def impl3(z, b, c, kw=None):
if a > 10: # noqa: F821
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl3)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn('<Parameter "a">', msg)
self.assertIn('<Parameter "z">', msg)
from .overload_usecases import impl4, impl5
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl4)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn("First difference: 'z'", msg)
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl5)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn('<Parameter "a">', msg)
self.assertIn('<Parameter "z">', msg)
# too many args
def impl6(a, b, c, d, e, kw=None):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl6)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn('<Parameter "d">', msg)
self.assertIn('<Parameter "e">', msg)
# too few args
def impl7(a, b, kw=None):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl7)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("argument names", msg)
self.assertFalse("keyword" in msg)
self.assertIn('<Parameter "c">', msg)
# too many kwargs
def impl8(a, b, c, kw=None, extra_kwarg=None):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl8)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("keyword argument names", msg)
self.assertIn('<Parameter "extra_kwarg=None">', msg)
# too few kwargs
def impl9(a, b, c):
if a > 10:
return 1
else:
return -1
with self.assertRaises(errors.TypingError) as e:
gen_ol(impl9)(1, 2, 3, 4)
msg = str(e.exception)
self.assertIn(sentinel, msg)
self.assertIn("keyword argument names", msg)
self.assertIn('<Parameter "kw=None">', msg)
def test_typing_vs_impl_signature_mismatch_handling_var_positional(self):
"""
Tests that an overload which has a differing typing and implementing
signature raises an exception and uses VAR_POSITIONAL (*args) in typing
"""
def myoverload(a, kw=None):
pass
from .overload_usecases import var_positional_impl
overload(myoverload)(var_positional_impl)
@jit(nopython=True)
def foo(a, b):
return myoverload(a, b, 9, kw=11)
with self.assertRaises(errors.TypingError) as e:
foo(1, 5)
msg = str(e.exception)
self.assertIn("VAR_POSITIONAL (e.g. *args) argument kind", msg)
self.assertIn("offending argument name is '*star_args_token'", msg)
def test_typing_vs_impl_signature_mismatch_handling_var_keyword(self):
"""
Tests that an overload which uses **kwargs (VAR_KEYWORD)
"""
def gen_ol(impl, strict=True):
def myoverload(a, kw=None):
pass
overload(myoverload, strict=strict)(impl)
@jit(nopython=True)
def foo(a, b):
return myoverload(a, kw=11)
return foo
# **kwargs in typing
def ol1(a, **kws):
def impl(a, kw=10):
return a
return impl
gen_ol(ol1, False)(1, 2) # no error if strictness not enforced
with self.assertRaises(errors.TypingError) as e:
gen_ol(ol1)(1, 2)
msg = str(e.exception)
self.assertIn("use of VAR_KEYWORD (e.g. **kwargs) is unsupported", msg)
self.assertIn("offending argument name is '**kws'", msg)
# **kwargs in implementation
def ol2(a, kw=0):
def impl(a, **kws):
return a
return impl
with self.assertRaises(errors.TypingError) as e:
gen_ol(ol2)(1, 2)
msg = str(e.exception)
self.assertIn("use of VAR_KEYWORD (e.g. **kwargs) is unsupported", msg)
self.assertIn("offending argument name is '**kws'", msg)
def test_overload_method_kwargs(self):
# Issue #3489
@overload_method(types.Array, "foo")
def fooimpl(arr, a_kwarg=10):
def impl(arr, a_kwarg=10):
return a_kwarg
return impl
@njit
def bar(A):
return A.foo(), A.foo(20), A.foo(a_kwarg=30)
Z = np.arange(5)
self.assertEqual(bar(Z), (10, 20, 30))
def test_overload_method_literal_unpack(self):
# Issue #3683
@overload_method(types.Array, "litfoo")
def litfoo(arr, val):
# Must be an integer
if isinstance(val, types.Integer):
# Must not be literal
if not isinstance(val, types.Literal):
def impl(arr, val):
return val
return impl
@njit
def bar(A):
return A.litfoo(0xCAFE)
A = np.zeros(1)
bar(A)
self.assertEqual(bar(A), 0xCAFE)
def test_overload_ufunc(self):
# Issue #4133.
# Use an extended type (MyDummyType) to use with a customized
# ufunc (np.exp).
@njit
def test():
return np.exp(mydummy)
self.assertEqual(test(), 0xDEADBEEF)
def test_overload_method_stararg(self):
@overload_method(MyDummyType, "method_stararg")
def _ov_method_stararg(obj, val, val2, *args):
def get(obj, val, val2, *args):
return (val, val2, args)
return get
@njit
def foo(obj, *args):
# Test with expanding stararg
return obj.method_stararg(*args)
obj = MyDummy()
self.assertEqual(foo(obj, 1, 2), (1, 2, ()))
self.assertEqual(foo(obj, 1, 2, 3), (1, 2, (3,)))
self.assertEqual(foo(obj, 1, 2, 3, 4), (1, 2, (3, 4)))
@njit
def bar(obj):
# Test with explicit argument
return (
obj.method_stararg(1, 2),
obj.method_stararg(1, 2, 3),
obj.method_stararg(1, 2, 3, 4),
)
self.assertEqual(
bar(obj), ((1, 2, ()), (1, 2, (3,)), (1, 2, (3, 4))),
)
# Check cases that put tuple type into stararg
# NOTE: the expected result has an extra tuple because of stararg.
self.assertEqual(
foo(obj, 1, 2, (3,)), (1, 2, ((3,),)),
)
self.assertEqual(
foo(obj, 1, 2, (3, 4)), (1, 2, ((3, 4),)),
)
self.assertEqual(
foo(obj, 1, 2, (3, (4, 5))), (1, 2, ((3, (4, 5)),)),
)
def test_overload_classmethod(self):
# Add classmethod to a subclass of Array
class MyArray(types.Array):
pass
@overload_classmethod(MyArray, "array_alloc")
def ol_array_alloc(cls, nitems):
def impl(cls, nitems):
arr = np.arange(nitems)
return arr
return impl
@njit
def foo(nitems):
return MyArray.array_alloc(nitems)
nitems = 13
self.assertPreciseEqual(foo(nitems), np.arange(nitems))
# Check that the base type doesn't get the classmethod
@njit
def no_classmethod_in_base(nitems):
return types.Array.array_alloc(nitems)
with self.assertRaises(errors.TypingError) as raises:
no_classmethod_in_base(nitems)
self.assertIn(
"Unknown attribute 'array_alloc' of",
str(raises.exception),
)
def _assert_cache_stats(cfunc, expect_hit, expect_misses):
hit = cfunc._cache_hits[cfunc.signatures[0]]
if hit != expect_hit:
raise AssertionError("cache not used")
miss = cfunc._cache_misses[cfunc.signatures[0]]
if miss != expect_misses:
raise AssertionError("cache not used")
@skip_if_typeguard
class TestOverloadMethodCaching(TestCase):
# Nested multiprocessing.Pool raises AssertionError:
# "daemonic processes are not allowed to have children"
_numba_parallel_test_ = False
def test_caching_overload_method(self):
self._cache_dir = temp_directory(self.__class__.__name__)
with override_config("CACHE_DIR", self._cache_dir):
self.run_caching_overload_method()
def run_caching_overload_method(self):
cfunc = jit(nopython=True, cache=True)(cache_overload_method_usecase)
self.assertPreciseEqual(cfunc(MyDummy()), 13)
_assert_cache_stats(cfunc, 0, 1)
llvmir = cfunc.inspect_llvm((mydummy_type,))
# Ensure the inner method is not a declaration
decls = [
ln
for ln in llvmir.splitlines()
if ln.startswith("declare") and "overload_method_length" in ln
]
self.assertEqual(len(decls), 0)
# Test in a separate process
try:
ctx = multiprocessing.get_context("spawn")
except AttributeError:
ctx = multiprocessing
q = ctx.Queue()
p = ctx.Process(
target=run_caching_overload_method, args=(q, self._cache_dir)
)
p.start()
q.put(MyDummy())
p.join()
# Ensure subprocess exited normally
self.assertEqual(p.exitcode, 0)
res = q.get(timeout=1)
self.assertEqual(res, 13)
def run_caching_overload_method(q, cache_dir):
"""
Used by TestOverloadMethodCaching.test_caching_overload_method
"""
with override_config("CACHE_DIR", cache_dir):
arg = q.get()
cfunc = jit(nopython=True, cache=True)(cache_overload_method_usecase)
res = cfunc(arg)
q.put(res)
# Check cache stat
_assert_cache_stats(cfunc, 1, 0)
class TestIntrinsic(TestCase):
def test_void_return(self):
"""
Verify that returning a None from codegen function is handled
automatically for void functions, otherwise raise exception.
"""
@intrinsic
def void_func(typingctx, a):
sig = types.void(types.int32)
def codegen(context, builder, signature, args):
pass # do nothing, return None, should be turned into
# dummy value
return sig, codegen
@intrinsic
def non_void_func(typingctx, a):
sig = types.int32(types.int32)
def codegen(context, builder, signature, args):
pass # oops, should be returning a value here, raise exception
return sig, codegen
@jit(nopython=True)
def call_void_func():
void_func(1)
return 0
@jit(nopython=True)
def call_non_void_func():
non_void_func(1)
return 0
# void func should work
self.assertEqual(call_void_func(), 0)
# not void function should raise exception
with self.assertRaises(LoweringError) as e:
call_non_void_func()
self.assertIn("non-void function returns None", e.exception.msg)
def test_ll_pointer_cast(self):
"""
Usecase test: custom reinterpret cast to turn int values to pointers
"""
from ctypes import CFUNCTYPE, POINTER, c_float, c_int
# Use intrinsic to make a reinterpret_cast operation
def unsafe_caster(result_type):
assert isinstance(result_type, types.CPointer)
@intrinsic
def unsafe_cast(typingctx, src):
self.assertIsInstance(typingctx, typing.Context)
if isinstance(src, types.Integer):
sig = result_type(types.uintp)
# defines the custom code generation
def codegen(context, builder, signature, args):
[src] = args
rtype = signature.return_type
llrtype = context.get_value_type(rtype)
return builder.inttoptr(src, llrtype)
return sig, codegen
return unsafe_cast
# make a nopython function to use our cast op.
# this is not usable from cpython due to the returning of a pointer.
def unsafe_get_ctypes_pointer(src):
raise NotImplementedError("not callable from python")
@overload(unsafe_get_ctypes_pointer, strict=False)
def array_impl_unsafe_get_ctypes_pointer(arrtype):
if isinstance(arrtype, types.Array):
unsafe_cast = unsafe_caster(types.CPointer(arrtype.dtype))
def array_impl(arr):
return unsafe_cast(src=arr.ctypes.data)
return array_impl
# the ctype wrapped function for use in nopython mode
def my_c_fun_raw(ptr, n):
for i in range(n):
print(ptr[i])
prototype = CFUNCTYPE(None, POINTER(c_float), c_int)
my_c_fun = prototype(my_c_fun_raw)
# Call our pointer-cast in a @jit compiled function and use
# the pointer in a ctypes function
@jit(nopython=True)
def foo(arr):
ptr = unsafe_get_ctypes_pointer(arr)
my_c_fun(ptr, arr.size)
# Test
arr = np.arange(10, dtype=np.float32)
with captured_stdout() as buf:
foo(arr)
got = buf.getvalue().splitlines()
buf.close()
expect = list(map(str, arr))
self.assertEqual(expect, got)
def test_serialization(self):
"""
Test serialization of intrinsic objects
"""
# define a intrinsic
@intrinsic
def identity(context, x):
def codegen(context, builder, signature, args):
return args[0]
sig = x(x)
return sig, codegen
# use in a jit function
@jit(nopython=True)
def foo(x):
return identity(x)
self.assertEqual(foo(1), 1)
# get serialization memo
memo = _Intrinsic._memo
memo_size = len(memo)
# pickle foo and check memo size
serialized_foo = pickle.dumps(foo)
# increases the memo size
memo_size += 1
self.assertEqual(memo_size, len(memo))
# unpickle
foo_rebuilt = pickle.loads(serialized_foo)
self.assertEqual(memo_size, len(memo))
# check rebuilt foo
self.assertEqual(foo(1), foo_rebuilt(1))
# pickle identity directly
serialized_identity = pickle.dumps(identity)
# memo size unchanged
self.assertEqual(memo_size, len(memo))
# unpickle
identity_rebuilt = pickle.loads(serialized_identity)
# must be the same object
self.assertIs(identity, identity_rebuilt)
# memo size unchanged
self.assertEqual(memo_size, len(memo))
def test_deserialization(self):
"""
Test deserialization of intrinsic
"""
def defn(context, x):
def codegen(context, builder, signature, args):
return args[0]
return x(x), codegen
memo = _Intrinsic._memo
memo_size = len(memo)
# invoke _Intrinsic indirectly to avoid registration which keeps an
# internal reference inside the compiler
original = _Intrinsic("foo", defn)
self.assertIs(original._defn, defn)
pickled = pickle.dumps(original)
# by pickling, a new memo entry is created
memo_size += 1
self.assertEqual(memo_size, len(memo))
del original # remove original before unpickling
# by deleting, the memo entry is NOT removed due to recent
# function queue
self.assertEqual(memo_size, len(memo))
# Manually force clear of _recent queue
_Intrinsic._recent.clear()
memo_size -= 1
self.assertEqual(memo_size, len(memo))
rebuilt = pickle.loads(pickled)
# verify that the rebuilt object is different
self.assertIsNot(rebuilt._defn, defn)
# the second rebuilt object is the same as the first
second = pickle.loads(pickled)
self.assertIs(rebuilt._defn, second._defn)
def test_docstring(self):
@intrinsic
def void_func(typingctx, a: int):
"""void_func docstring"""
sig = types.void(types.int32)
def codegen(context, builder, signature, args):
pass # do nothing, return None, should be turned into
# dummy value
return sig, codegen
self.assertEqual("numba.tests.test_extending", void_func.__module__)
self.assertEqual("void_func", void_func.__name__)
self.assertEqual("TestIntrinsic.test_docstring.<locals>.void_func",
void_func.__qualname__)
self.assertDictEqual({'a': int}, void_func.__annotations__)
self.assertEqual("void_func docstring", void_func.__doc__)
class TestRegisterJitable(unittest.TestCase):
def test_no_flags(self):
@register_jitable
def foo(x, y):
return x + y
def bar(x, y):
return foo(x, y)
cbar = jit(nopython=True)(bar)
expect = bar(1, 2)
got = cbar(1, 2)
self.assertEqual(expect, got)
def test_flags_no_nrt(self):
@register_jitable(_nrt=False)
def foo(n):
return np.arange(n)
def bar(n):
return foo(n)
self.assertEqual(bar(3).tolist(), [0, 1, 2])
cbar = jit(nopython=True)(bar)
with self.assertRaises(errors.TypingError) as raises:
cbar(2)
msg = (
"Only accept returning of array passed into the function as "
"argument"
)
self.assertIn(msg, str(raises.exception))
class TestImportCythonFunction(unittest.TestCase):
@unittest.skipIf(sc is None, "Only run if SciPy >= 0.19 is installed")
def test_getting_function(self):
addr = get_cython_function_address(
"scipy.special.cython_special", "j0"
)
functype = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double)
_j0 = functype(addr)
j0 = jit(nopython=True)(lambda x: _j0(x))
self.assertEqual(j0(0), 1)
def test_missing_module(self):
with self.assertRaises(ImportError) as raises:
get_cython_function_address("fakemodule", "fakefunction")
# The quotes are not there in Python 2
msg = "No module named '?fakemodule'?"
match = re.match(msg, str(raises.exception))
self.assertIsNotNone(match)
@unittest.skipIf(sc is None, "Only run if SciPy >= 0.19 is installed")
def test_missing_function(self):
with self.assertRaises(ValueError) as raises:
get_cython_function_address(
"scipy.special.cython_special", "foo"
)
msg = (
"No function 'foo' found in __pyx_capi__ of "
"'scipy.special.cython_special'"
)
self.assertEqual(msg, str(raises.exception))
@overload_method(
MyDummyType, "method_jit_option_check_nrt", jit_options={"_nrt": True}
)
def ov_method_jit_option_check_nrt(obj):
def imp(obj):
return np.arange(10)
return imp
@overload_method(
MyDummyType, "method_jit_option_check_no_nrt", jit_options={"_nrt": False}
)
def ov_method_jit_option_check_no_nrt(obj):
def imp(obj):
return np.arange(10)
return imp
@overload_attribute(
MyDummyType, "attr_jit_option_check_nrt", jit_options={"_nrt": True}
)
def ov_attr_jit_option_check_nrt(obj):
def imp(obj):
return np.arange(10)
return imp
@overload_attribute(
MyDummyType, "attr_jit_option_check_no_nrt", jit_options={"_nrt": False}
)
def ov_attr_jit_option_check_no_nrt(obj):
def imp(obj):
return np.arange(10)
return imp
class TestJitOptionsNoNRT(TestCase):
# Test overload*(jit_options={...}) by turning off _nrt
def check_error_no_nrt(self, func, *args, **kwargs):
# Check that the compilation fails with a complaint about dynamic array
msg = (
"Only accept returning of array passed into "
"the function as argument"
)
with self.assertRaises(errors.TypingError) as raises:
func(*args, **kwargs)
self.assertIn(msg, str(raises.exception))
def no_nrt_overload_check(self, flag):
def dummy():
return np.arange(10)
@overload(dummy, jit_options={"_nrt": flag})
def ov_dummy():
def dummy():
return np.arange(10)
return dummy
@njit
def foo():
return dummy()
if flag:
self.assertPreciseEqual(foo(), np.arange(10))
else:
self.check_error_no_nrt(foo)
def test_overload_no_nrt(self):
self.no_nrt_overload_check(True)
self.no_nrt_overload_check(False)
def test_overload_method_no_nrt(self):
@njit
def udt(x):
return x.method_jit_option_check_nrt()
self.assertPreciseEqual(udt(mydummy), np.arange(10))
@njit
def udt(x):
return x.method_jit_option_check_no_nrt()
self.check_error_no_nrt(udt, mydummy)
def test_overload_attribute_no_nrt(self):
@njit
def udt(x):
return x.attr_jit_option_check_nrt
self.assertPreciseEqual(udt(mydummy), np.arange(10))
@njit
def udt(x):
return x.attr_jit_option_check_no_nrt
self.check_error_no_nrt(udt, mydummy)
class TestBoxingCallingJIT(TestCase):
def setUp(self):
super().setUp()
many = base_dummy_type_factory("mydummy2")
self.DynTypeType, self.DynType, self.dyn_type_type = many
self.dyn_type = self.DynType()
def test_unboxer_basic(self):
# Implements an unboxer on DynType that calls an intrinsic into the
# unboxer code.
magic_token = 0xCAFE
magic_offset = 123
@intrinsic
def my_intrinsic(typingctx, val):
# An intrinsic that returns `val + magic_offset`
def impl(context, builder, sig, args):
[val] = args
return builder.add(val, val.type(magic_offset))
sig = signature(val, val)
return sig, impl
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
# The unboxer that calls some jitcode
def bridge(x):
# proof that this is a jit'ed context by calling jit only
# intrinsic
return my_intrinsic(x)
args = [c.context.get_constant(types.intp, magic_token)]
sig = signature(types.voidptr, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
return NativeValue(res, is_error=is_error)
@box(self.DynTypeType)
def boxer(typ, val, c):
# The boxer that returns an integer representation
res = c.builder.ptrtoint(val, cgutils.intp_t)
return c.pyapi.long_from_ssize_t(res)
@njit
def passthru(x):
return x
out = passthru(self.dyn_type)
self.assertEqual(out, magic_token + magic_offset)
def test_unboxer_raise(self):
# Testing exception raising in jitcode called from unboxing.
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
# The unboxer that calls some jitcode
def bridge(x):
if x > 0:
raise ValueError("cannot be x > 0")
return x
args = [c.context.get_constant(types.intp, 1)]
sig = signature(types.voidptr, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
return NativeValue(res, is_error=is_error)
@box(self.DynTypeType)
def boxer(typ, val, c):
# The boxer that returns an integer representation
res = c.builder.ptrtoint(val, cgutils.intp_t)
return c.pyapi.long_from_ssize_t(res)
@njit
def passthru(x):
return x
with self.assertRaises(ValueError) as raises:
passthru(self.dyn_type)
self.assertIn(
"cannot be x > 0", str(raises.exception),
)
def test_boxer(self):
# Call jitcode inside the boxer
magic_token = 0xCAFE
magic_offset = 312
@intrinsic
def my_intrinsic(typingctx, val):
# An intrinsic that returns `val + magic_offset`
def impl(context, builder, sig, args):
[val] = args
return builder.add(val, val.type(magic_offset))
sig = signature(val, val)
return sig, impl
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
@box(self.DynTypeType)
def boxer(typ, val, c):
# Note: this doesn't do proper error handling
def bridge(x):
return my_intrinsic(x)
args = [c.context.get_constant(types.intp, magic_token)]
sig = signature(types.intp, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
return c.pyapi.long_from_ssize_t(res)
@njit
def passthru(x):
return x
r = passthru(self.dyn_type)
self.assertEqual(r, magic_token + magic_offset)
def test_boxer_raise(self):
# Call jitcode inside the boxer
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
@box(self.DynTypeType)
def boxer(typ, val, c):
def bridge(x):
if x > 0:
raise ValueError("cannot do x > 0")
return x
args = [c.context.get_constant(types.intp, 1)]
sig = signature(types.intp, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
# The error handling
retval = cgutils.alloca_once(c.builder, c.pyapi.pyobj, zfill=True)
with c.builder.if_then(c.builder.not_(is_error)):
obj = c.pyapi.long_from_ssize_t(res)
c.builder.store(obj, retval)
return c.builder.load(retval)
@njit
def passthru(x):
return x
with self.assertRaises(ValueError) as raises:
passthru(self.dyn_type)
self.assertIn(
"cannot do x > 0", str(raises.exception),
)
def with_objmode_cache_ov_example(x):
# This is the function stub for overloading inside
# TestCachingOverloadObjmode.test_caching_overload_objmode
pass
@skip_if_typeguard
class TestCachingOverloadObjmode(TestCase):
"""Test caching of the use of overload implementations that use
`with objmode`
"""
_numba_parallel_test_ = False
def setUp(self):
warnings.simplefilter("error", errors.NumbaWarning)
def tearDown(self):
warnings.resetwarnings()
def test_caching_overload_objmode(self):
cache_dir = temp_directory(self.__class__.__name__)
with override_config("CACHE_DIR", cache_dir):
def realwork(x):
# uses numpy code
arr = np.arange(x) / x
return np.linalg.norm(arr)
def python_code(x):
# create indirections
return realwork(x)
@overload(with_objmode_cache_ov_example)
def _ov_with_objmode_cache_ov_example(x):
def impl(x):
with objmode(y="float64"):
y = python_code(x)
return y
return impl
@njit(cache=True)
def testcase(x):
return with_objmode_cache_ov_example(x)
expect = realwork(123)
got = testcase(123)
self.assertEqual(got, expect)
testcase_cached = njit(cache=True)(testcase.py_func)
got = testcase_cached(123)
self.assertEqual(got, expect)
@classmethod
def check_objmode_cache_ndarray(cls):
def do_this(a, b):
return np.sum(a + b)
def do_something(a, b):
return np.sum(a + b)
@overload(do_something)
def overload_do_something(a, b):
def _do_something_impl(a, b):
with objmode(y='float64'):
y = do_this(a, b)
return y
return _do_something_impl
@njit(cache=True)
def test_caching():
a = np.arange(20)
b = np.arange(20)
return do_something(a, b)
got = test_caching()
expect = test_caching.py_func()
# Check result
if got != expect:
raise AssertionError("incorrect result")
return test_caching
@classmethod
def check_objmode_cache_ndarray_check_cache(cls):
disp = cls.check_objmode_cache_ndarray()
if len(disp.stats.cache_misses) != 0:
raise AssertionError('unexpected cache miss')
if len(disp.stats.cache_hits) <= 0:
raise AssertionError("unexpected missing cache hit")
def test_check_objmode_cache_ndarray(self):
# See issue #6130.
# Env is missing after cache load.
cache_dir = temp_directory(self.__class__.__name__)
with override_config("CACHE_DIR", cache_dir):
# Test in local process to populate the cache.
self.check_objmode_cache_ndarray()
# Run in new process to use the cache in a fresh process.
res = run_in_new_process_in_cache_dir(
self.check_objmode_cache_ndarray_check_cache, cache_dir
)
self.assertEqual(res['exitcode'], 0)
class TestMisc(TestCase):
def test_is_jitted(self):
def foo(x):
pass
self.assertFalse(is_jitted(foo))
self.assertTrue(is_jitted(njit(foo)))
self.assertFalse(is_jitted(vectorize(foo)))
self.assertFalse(is_jitted(vectorize(parallel=True)(foo)))
self.assertFalse(
is_jitted(guvectorize("void(float64[:])", "(m)")(foo))
)
class TestOverloadPreferLiteral(TestCase):
def test_overload(self):
def prefer_lit(x):
pass
def non_lit(x):
pass
def ov(x):
if isinstance(x, types.IntegerLiteral):
# With prefer_literal=False, this branch will not be reached.
if x.literal_value == 1:
def impl(x):
return 0xcafe
return impl
else:
raise errors.TypingError('literal value')
else:
def impl(x):
return x * 100
return impl
overload(prefer_lit, prefer_literal=True)(ov)
overload(non_lit)(ov)
@njit
def check_prefer_lit(x):
return prefer_lit(1), prefer_lit(2), prefer_lit(x)
a, b, c = check_prefer_lit(3)
self.assertEqual(a, 0xcafe)
self.assertEqual(b, 200)
self.assertEqual(c, 300)
@njit
def check_non_lit(x):
return non_lit(1), non_lit(2), non_lit(x)
a, b, c = check_non_lit(3)
self.assertEqual(a, 100)
self.assertEqual(b, 200)
self.assertEqual(c, 300)
def test_overload_method(self):
def ov(self, x):
if isinstance(x, types.IntegerLiteral):
# With prefer_literal=False, this branch will not be reached.
if x.literal_value == 1:
def impl(self, x):
return 0xcafe
return impl
else:
raise errors.TypingError('literal value')
else:
def impl(self, x):
return x * 100
return impl
overload_method(
MyDummyType, "method_prefer_literal",
prefer_literal=True,
)(ov)
overload_method(
MyDummyType, "method_non_literal",
prefer_literal=False,
)(ov)
@njit
def check_prefer_lit(dummy, x):
return (
dummy.method_prefer_literal(1),
dummy.method_prefer_literal(2),
dummy.method_prefer_literal(x),
)
a, b, c = check_prefer_lit(MyDummy(), 3)
self.assertEqual(a, 0xcafe)
self.assertEqual(b, 200)
self.assertEqual(c, 300)
@njit
def check_non_lit(dummy, x):
return (
dummy.method_non_literal(1),
dummy.method_non_literal(2),
dummy.method_non_literal(x),
)
a, b, c = check_non_lit(MyDummy(), 3)
self.assertEqual(a, 100)
self.assertEqual(b, 200)
self.assertEqual(c, 300)
if __name__ == "__main__":
unittest.main()
|
bsd-2-clause
|
taimir/infogan-keras
|
learn/utils/visualization.py
|
1
|
7415
|
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import tensorflow as tf
import numpy as np
from scipy import interp
from sklearn.metrics import auc, roc_curve, silhouette_score, silhouette_samples
colors = ['#991012', '#c4884e', '#93bf8d', '#a3dbff']
sns.set_palette(colors)
def image_grid(input_tensor, grid_shape, image_shape):
"""
form_image_grid forms a grid of image tiles from input_tensor.
:param input_tensor - batch of images, shape (N, height, width, n_channels)
:param grid_shape - shape (in tiles) of the grid, e.g. (10, 10)
:param image_shape - shape of a single image, e.g. (28, 28, 1)
"""
# take the subset of images
input_tensor = input_tensor[:grid_shape[0] * grid_shape[1]]
# add black tiles if needed
required_pad = grid_shape[0] * grid_shape[1] - tf.shape(input_tensor)[0]
def add_pad():
padding = tf.zeros((required_pad,) + image_shape)
return tf.concat([input_tensor, padding], axis=0)
input_tensor = tf.cond(required_pad > 0, add_pad, lambda: input_tensor)
# height and width of the grid
height, width = grid_shape[0] * image_shape[0], grid_shape[1] * image_shape[1]
# form the grid tensor
input_tensor = tf.reshape(input_tensor, grid_shape + image_shape)
# flip height and width
input_tensor = tf.transpose(input_tensor, [0, 1, 3, 2, 4])
# form the rows
input_tensor = tf.reshape(input_tensor, [grid_shape[0], width, image_shape[0], image_shape[2]])
# flip width and height again
input_tensor = tf.transpose(input_tensor, [0, 2, 1, 3])
# form the columns
input_tensor = tf.reshape(input_tensor, [1, height, width, image_shape[2]])
return input_tensor
class ROCView(object):
"""
ROCView generates and plots the ROC curves of a model.
The view is created in a way that allows multiple ROC curves to be added to it before
it is saved.
Usage:
>>> tpr = [0.3, 1, 1]
>>> fpr = [0, 0.4, 1]
>>> view = ROCView("my/data/dir")
>>> view.add_curve(fpr=fpr, tpr=tpr, label="ROC of model 1")
>>> # you can call view.add_curve() again if needed
>>> view.save_and_close("example_file.png")
"""
def __init__(self):
self.ax, self.fig = self._init_ROC()
def _init_ROC(self):
"""
initialise the plots (figure, axes)
:return:
"""
sns.set_style("whitegrid")
fig = plt.figure()
ax = plt.subplot(111)
ax.set_aspect(1)
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.axes().set_aspect('equal')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False positive rate', size=10)
plt.ylabel('True positive rate', size=10)
plt.title('Receiver operating characteristic', size=15)
return ax, fig
def add_curve(self, fpr, tpr, label):
"""
computes and draws a ROC curve for the given TPR and FPR, adds a legend with the specified
label and the AUC score
:param fpr: array, false positive rate
:param tpr: array, true positive rate
:param label: text to be put into the legend entry for this curve
"""
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=2, label='{0} (AUC = {1:0.2f})'.format(label, roc_auc))
def save_and_close(self, file_path):
"""
saves the figure into a file.
:param file_path: path to the file for the figure of the ROC curve
:return:
"""
# Put a legend below current axis
self.ax.legend(loc='lower right', fancybox=True, shadow=True, ncol=1, prop={'size': 9},
frameon=True)
self.fig.savefig(filename=file_path, bbox_inches='tight')
def micro_macro_roc(n_classes, y_expected, y_predicted):
"""
MicroMacroROC can create the TPR (True positive rate) and FPR (false positive rate)
for two different ROC curves based on multi-class classification results:
* "micro" : fpr, tpr are computed for the flattened predictions for all
classes (i.e. all predictions combined). Weakly represented classes
thus contribute less to the "micro" curve.
* "macro" : fpr, tpr are computed as an average of the ROC
curves for each of the classes. Thus every class is treated as
equally important in the "macro" curve.
:param n_classes: how many classes does the classifier predict for
:param y_expected: a numpy array of expected class labels
(1-hot encoded)
:param y_predicted: a numpy array of prediction scores
:return: {
"micro": (fpr, tpr),
"macro": (fpr, tpr)
}
"""
# Compute micro-average ROC curve
micro_fpr, micro_tpr, _ = roc_curve(y_expected.ravel(), y_predicted.ravel())
# Compute macro-average ROC curve
# First aggregate all false positive rates per class into one array
per_class_fpr = dict()
per_class_tpr = dict()
for i in range(n_classes):
per_class_fpr[i], per_class_tpr[i], _ = roc_curve(y_expected[:, i], y_predicted[:, i])
all_fpr = np.unique(np.concatenate([per_class_fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, per_class_fpr[i], per_class_tpr[i])
# Finally average it
mean_tpr /= float(n_classes)
macro_fpr = all_fpr
macro_tpr = mean_tpr
return {
"micro": (micro_fpr, micro_tpr),
"macro": (macro_fpr, macro_tpr)
}
def cluster_silhouette_view(X, y, file_path, n_clusters):
# initialize the figure
sns.set_style("whitegrid")
fig = plt.figure()
ax = plt.subplot(111)
plt.xlim([-0.5, 1.0])
plt.ylim([0, X.shape[0] + (n_clusters + 1) * 10])
plt.xlabel('Silhouette score per sample', size=10)
plt.ylabel('Samples in clusters', size=10)
plt.title('Silhouette scores', size=15)
# compute the silhoette average score of the clustering
score_avg = silhouette_score(X, y)
print("The average silhouette score is :", score_avg)
# Compute the silhouette scores for each sample
score_per_sample = silhouette_samples(X, y)
y_lower = 10
for i in range(n_clusters):
# scores of the samples in i'th cluster, sorted
score_per_sample_i = score_per_sample[y == i]
score_per_sample_i.sort()
size_cluster_i = score_per_sample_i.shape[0]
# do the plotting of the diagram
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
plt.fill_betweenx(np.arange(y_lower, y_upper),
0, score_per_sample_i, alpha=0.7,
facecolor=color,
edgecolor=color,
label="cluster {}".format(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
# The vertical line for average silhouette score of all the values
ax.axvline(x=score_avg, color="red", linestyle="--")
ax.set_yticks([]) # Clear the yaxis labels / ticks
ax.legend(loc='lower right', fancybox=True, shadow=True, ncol=1, prop={'size': 9},
frameon=True)
fig.savefig(filename=file_path, bbox_inches='tight')
|
mit
|
danbob123/gplearn
|
gplearn/skutils/mocking.py
|
2
|
1809
|
from sklearn.base import BaseEstimator
from gplearn.skutils.testing import assert_true
class ArraySlicingWrapper(object):
def __init__(self, array):
self.array = array
def __getitem__(self, aslice):
return MockDataFrame(self.array[aslice])
class MockDataFrame(object):
# have shape an length but don't support indexing.
def __init__(self, array):
self.array = array
self.shape = array.shape
self.ndim = array.ndim
# ugly hack to make iloc work.
self.iloc = ArraySlicingWrapper(array)
def __len__(self):
return len(self.array)
def __array__(self):
# Pandas data frames also are array-like: we want to make sure that
# input validation in cross-validation does not try to call that
# method.
return self.array
class CheckingClassifier(BaseEstimator):
"""Dummy classifier to test pipelining and meta-estimators.
Checks some property of X and y in fit / predict.
This allows testing whether pipelines / cross-validation or metaestimators
changed the input.
"""
def __init__(self, check_y=None,
check_X=None, foo_param=0):
self.check_y = check_y
self.check_X = check_X
self.foo_param = foo_param
def fit(self, X, y):
assert_true(len(X) == len(y))
if self.check_X is not None:
assert_true(self.check_X(X))
if self.check_y is not None:
assert_true(self.check_y(y))
return self
def predict(self, T):
if self.check_X is not None:
assert_true(self.check_X(T))
return T.shape[0]
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
|
bsd-3-clause
|
srndic/mimicus
|
setup.py
|
1
|
2350
|
#!/usr/bin/env python
'''
Copyright 2014 Nedim Srndic, University of Tuebingen
This file is part of Mimicus.
Mimicus is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Mimicus is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Mimicus. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
setup.py
Created on March 11, 2014.
'''
import multiprocessing # To fix a bug when running tests
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
def readme():
with open('README.rst') as f:
return f.read()
class MyInstall(install):
'''
A class for running custom post-install code.
'''
def run(self):
'''
Runs after installation.
'''
install.run(self)
from mimicus import config
class MyDevelop(develop):
'''
A class for running custom post-install code in develop-mode.
'''
def run(self):
'''
Runs after develop-mode installation.
'''
develop.run(self)
from mimicus import config
setup(name='mimicus',
version='1.0',
description='A library for adversarial classifier evasion',
url='https://github.com/srndic/mimicus',
download_url='https://github.com/srndic/mimicus/tarball/master',
author='Nedim Srndic, Pavel Laskov',
author_email='[email protected]',
license='GPLv3',
packages=find_packages(),
install_requires=['matplotlib >= 1.1.1rc',
'numpy >= 1.6.1',
'scikit_learn >= 0.13.1',
'scipy >= 0.9.0'],
zip_safe=False,
test_suite='nose.collector',
tests_require=['nose'],
include_package_data=True,
cmdclass={'install': MyInstall,
'develop': MyDevelop})
|
gpl-3.0
|
bhillmann/koko
|
koko/classifiers/linear_perceptron.py
|
1
|
2542
|
"""
This is an implementation of the mini-batch gradient descent algorithm for a linear perceptron model
The class model is based off of sklearn:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html
"""
import numpy as np
from koko.optimization import minibatch_gradient_descent
from koko.utils import add_intercept
class LinearPerceptron:
def __init__(self, epochs=5, eta=.01, optimization=None):
"""
A Linear Perceptron implementation using Numpy vector optimizations
:param epochs: the number of passes through the data
:param eta: the learning rate
:param optimization: the optimization method given the data matrix X, and the target vector y
"""
self.n_classes = None
self.weights = None
self.epochs = epochs
self.eta = eta
if not optimization:
self.optimization = lambda X, y: minibatch_gradient_descent(X, y, self.eta, self._compute_gradient,
self.epochs)
def fit(self, X, y):
"""
Fit the weights of the perceptron
:param X: the data to fit
:param y: the target vector
"""
X = add_intercept(X)
self.n_classes = np.unique(y).shape[0]
if self.n_classes != 2:
Exception()
self.classes = dict(zip((-1, 1), np.unique(y)))
target = np.array([1 if self.classes[1] == _ else -1 for _ in y])
self.weights = self.optimization(X, target)
def _compute_gradient(self, X, y, weights):
"""
Computes the gradient of the perceptron model
:param X: data matrix
:param y: target vector
:param weights: current weights
:return: gradient vector
"""
y = np.atleast_1d(y)
X = np.atleast_2d(X)
errors = y-np.inner(weights, X)
return np.inner(-errors, X.T)
def predict(self, X):
"""
Use the fitted model to predict new data
:param X: data to predict
:return: predicted calls vector
"""
X = add_intercept(X)
temp = self._activation_function(np.inner(self.weights, X))
return np.array([self.classes[_] for _ in temp.astype(int)])
def _activation_function(self, x):
"""
The activation function for a perceptron
:param x: prediction vector
:return: binary activation vector
"""
return np.where(x >= 0., 1, -1)
|
mit
|
glouppe/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
32
|
17897
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
|
bsd-3-clause
|
sinhrks/scikit-learn
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
maojrs/Interface_Euler_AMR
|
code_cartesian/setplot.py
|
1
|
12049
|
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
# Note: To change plotted time scale, edit frametools.py in visclaw
import os
import numpy as np
from matplotlib import rc
rc('text', usetex=True)
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of clawpack.visclaw.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps
plotdata.clearfigures() # clear any old figures,axes,items data
# Plot outline of interface withut mapping
def aa(current_data):
from pylab import linspace,plot,annotate,text
from pylab import title, xlabel, ylabel, xticks, yticks, colorbar
# Plot interface
rout = 0.015
rinn = 0.010
x = [-rout, -rout, rout, rout]
y = [0.0, rout, rout, 0.0]
plot(x,y,'k',linewidth=4.0)
# Chage title
t = current_data.t
tmicros = 1000000*t
title(r"Pressure at time t = %10.2f $\mu s$" % tmicros, fontsize=16)
# Change axes
xlabel(r"$cm$", fontsize='16')
ylabel(r"$cm$", fontsize='16')
# Change ticks on axes (WATCHOUT IF DOMAIN OF SIMULATION IS CHANGED)
xxticks = np.arange(-0.05, 0.05, 0.00999)
labelsx = range(xxticks.size)
labelsx[:] = [x - 5 for x in labelsx]
xticks(xxticks, labelsx)
yyticks = np.arange(0.0, 0.03, 0.00999)
labelsy = range(yyticks.size)
labelsy[:] = [y for y in labelsy]
yticks(yyticks, labelsy)
# Plot outline of interface
def aa1DPSIcm(current_data):
from pylab import linspace,plot,annotate,text,xlabel,ylabel
#gcs = 2.0/200.0
x = [-1.5,-1.5,1.5,1.5]
y = [-100,100,100,-100]
#y[:] = [xx - gcs for xx in y]
plot(x,y,'k',linewidth=2.0)
xlabel('cm',fontsize='16')
ylabel('psi',fontsize='16')
xcav = [-3.0,3.0]
ycav = [-14.334351113,-14.334351113] #Water vapour pressure for cavitation at room temp in 1atm=0 ref system
plot(xcav,ycav,'b--')
#plot(-8.0, 180000, 'vk', markersize=10)
#plot(-2.0, 180000, 'vk', markersize=10)
#plot(0.0, 180000, 'vk', markersize=10)
#plot(2.0, 180000, 'vk', markersize=10)
text(-0.75,27,'Water',fontweight='bold',fontsize=20)
#text(-0.8,285000,'PS',fontweight='bold',fontsize=20)
text(-2.9,27,'Air',fontweight='bold',fontsize=20)
text(1.6,27,'Air',fontweight='bold',fontsize=20)
text(-1.45,-13,'Vapor pressure',fontsize=15,color='blue')
# Function to calculate pressure when using Tammann EOS
def Pressure(current_data):
q = current_data.q # solution when this function called
aux = current_data.aux
gamma = aux[0,:,:]
gamma1 = aux[0,:,:] - 1.0
pinf = aux[1,:,:]
omega = aux[2,:,:]
rho = q[0,:,:] # density
momx = q[1,:,:] # momentum x
momy = q[2,:,:] # momentum y
ene = q[3,:,:] # energy
P = gamma1*(ene - 0.5*(momx*momx + momy*momy)/rho)
P = P - gamma*pinf
return P
# Figure for Density
# -------------------
plotfigure = plotdata.new_plotfigure(name='Density', figno=0)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [-0.03,0.03] #'auto'
plotaxes.ylimits = [-0.05,0.05]#'auto'
plotaxes.title = 'Density'
#plotaxes.scaled = True # so aspect ratio is 1
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = 0
plotitem.pcolor_cmap = colormaps.yellow_red_blue
#plotitem.pcolor_cmin = 0.8
#plotitem.pcolor_cmax = 3.0
plotitem.add_colorbar = True
plotitem.pcolor_cmin = 1.0
plotitem.pcolor_cmax = 2.0
plotitem.show = True # show on plot?
plotitem.MappedGrid = False
plotaxes.afteraxes = aa
# Figure for momentum x
# -------------------
plotfigure = plotdata.new_plotfigure(name='Momentum x', figno=1)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [-0.03,0.03] #'auto'
plotaxes.ylimits = [-0.05,0.05] #'auto'
plotaxes.title = 'Momentum x'
#plotaxes.scaled = True # so aspect ratio is 1
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = 1
plotitem.pcolor_cmap = colormaps.yellow_red_blue
plotitem.add_colorbar = True
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 160.0
plotitem.show = True # show on plot?
plotitem.MappedGrid = False
plotaxes.afteraxes = aa
# Figure for momentum y
# -------------------
plotfigure = plotdata.new_plotfigure(name='Momentum y', figno=2)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [-0.03,0.03]#'auto'
plotaxes.ylimits = [-0.05,0.05]#'auto'
plotaxes.title = 'Momentum y'
#plotaxes.scaled = True # so aspect ratio is 1
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = 2
plotitem.pcolor_cmap = colormaps.yellow_red_blue
plotitem.add_colorbar = True
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 160.0
plotitem.show = True # show on plot?
plotitem.MappedGrid = False
plotaxes.afteraxes = aa
# Figure for Energy
# -------------------
plotfigure = plotdata.new_plotfigure(name='Energy', figno=3)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [-0.03,0.03]#'auto'
plotaxes.ylimits = [-0.05,0.05]#'auto'
plotaxes.title = 'Energy'
#plotaxes.scaled = True # so aspect ratio is 1
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = 3
plotitem.pcolor_cmap = colormaps.yellow_red_blue
plotitem.add_colorbar = True
plotitem.show = True # show on plot?
plotitem.pcolor_cmin = 200000
plotitem.pcolor_cmax = 400000
plotitem.MappedGrid = False
plotaxes.afteraxes = aa
# Figure for Pressure
# -------------------
plotfigure = plotdata.new_plotfigure(name='Pressure', figno=4)
plotfigure.kwargs = {'figsize':[8,3.7], 'tight_layout':True}
#plotfigure.kwargs = {'figsize':[8,8], 'tight_layout':True} # For colorbar output
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('Pressure')
plotaxes.xlimits = [-0.04,0.04]
plotaxes.ylimits = [0.001,0.035]
plotaxes.title = 'Pressure'
plotaxes.scaled = True # so aspect ratio is 1
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.pcolor_cmin = 90000
plotitem.pcolor_cmax = 230000
#plotitem.pcolor_cmap = colormaps.white_blue
#white_green_cmap = colormaps.make_colormap({0.:'w', 0.35: '#54ED96', 0.7: '#31BCBC', 1.:'#005F8B'}) #5CDAE3
#white_green_cmap = colormaps.make_colormap({0.:'w', 0.35: '#60E9D0', 0.7: '#3174B7', 1.:'#0B357F'}) #5CDAE3
white_green_cmap = colormaps.make_colormap({0.:'w', 0.35: '#AAFFEF', 0.7: '#62B4E7', 1.:'#4584F0'})
plotitem.pcolor_cmap = white_green_cmap
#plotitem.add_colorbar = True
plotitem.plot_var = Pressure # defined above
#plotitem.plotstyle = '-o'
#plotitem.color = 'r'
# For AMR patches and cell edges (# REMEMBER TO CHANGE amr_contour_show TOO)
plotitem.amr_patchedges_show = [0,0,0,1] #[0,0,0,0,1] #[0,0,0,0,0,1]
plotitem.amr_celledges_show = [1,1,1,0] #[1,1,0,0,0] #[1,1,1,1,0,0]
plotitem.MappedGrid = True
plotitem.MappedGrid = False
# Add contours as well
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.plot_var = Pressure
plotitem.contour_levels = np.linspace(90000,230000,30)
#plotitem.contour_nlevels = 10
#plotitem.contour_min = 91000.0
#plotitem.contour_max = 290000.0
#plotitem.amr_patchedges_show = [0,0,1]
#plotitem.amr_celledges_show = [1,1,0]
plotitem.MappedGrid = False
plotitem.show = True
plotitem.amr_contour_colors = ['b','#3C3C3C','k']
plotitem.amr_contour_show = [0, 0, 0, 1]
plotaxes.afteraxes = aa
# Figure for Pressure (Schlieren)
plotfigure = plotdata.new_plotfigure(name='Pressure schlieren', figno=9)
plotfigure.kwargs = {'figsize':[8,3.7], 'tight_layout':True}
#plotfigure.kwargs = {'figsize':[8,8], 'tight_layout':True} # For colorbar output
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('Pressure')
plotaxes.xlimits = [-0.04,0.04]
plotaxes.ylimits = [0.001,0.035]
plotaxes.title = 'Pressure sclieren'
plotaxes.scaled = True # so aspect ratio is 1
plotitem = plotaxes.new_plotitem(plot_type='2d_schlieren')
plotitem.schlieren_cmin = 500 #2000 #500 #20
plotitem.schlieren_cmax = 30000 #3500 #25000 #30000
plotitem.add_colorbar = True
plotitem.plot_var = Pressure # defined above
# For AMR
plotitem.amr_patchedges_show = [0,0,0,0,1]
plotitem.amr_celledges_show = [0,0,0,0,0]
plotitem.MappedGrid = True
plotitem.MappedGrid = False
plotaxes.afteraxes = aa
# Figure for Pressure slice
# -------------------
plotfigure = plotdata.new_plotfigure(name='Pressure slice', figno=6)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
# Axes for m vs Pa or cm vs PSI
#plotaxes.xlimits = [-0.03,0.03] #[-3,3] #[-8.5,16] #'auto' -16
#plotaxes.ylimits = [0.00000,300000]
plotaxes.xlimits = [-3.0,3.0]
plotaxes.ylimits = [-20,30]
plotaxes.title = 'Pressure slice'
plotitem = plotaxes.new_plotitem(plot_type='1d_from_2d_data')
def xsec(current_data):
# Return x value and surface eta at this point, along y=0
from pylab import find,ravel
x = current_data.x
y = current_data.y
dy = current_data.dy
q = current_data.q
aux = current_data.aux
ij = find((y <= dy/2.) & (y > -dy/2.))
x_slice = ravel(x)[ij]
gamma_slice = ravel(aux[0,:,:])[ij]
pinf_slice = ravel(aux[1,:,:])[ij]
rho_slice = ravel(q[0,:,:])[ij]
momx_slice = ravel(q[1,:,:])[ij]
momy_slice = ravel(q[2,:,:])[ij]
ene_slice = ravel(q[3,:,:])[ij]
P_slice = (gamma_slice - 1.0)*(ene_slice - 0.5*(momx_slice**2 + momy_slice**2)/rho_slice)
P_slice = P_slice - gamma_slice*pinf_slice
# Convert to Psi and centimeters
P_slice = P_slice*0.000145038 - 14.6959488
x_slice = 100*x_slice
return x_slice, P_slice
plotitem.map_2d_to_1d = xsec
plotitem.plotstyle = '-kx'
plotitem.kwargs = {'markersize':3}
plotaxes.afteraxes = aa1DPSIcm
# Parameters used only when creating html and/or latex hardcopy
# e.g., via clawpack.visclaw.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos =[34, 70, 78, 86, 110, 143] # list of frames to print 'all' for all frames
plotdata.print_fignos = [4] #'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
|
bsd-2-clause
|
nschloe/quadpy
|
tests/test_c1.py
|
1
|
7770
|
import math
import numpy as np
import orthopy
import pytest
from mpmath import mp
import quadpy
@pytest.mark.parametrize(
"scheme",
[quadpy.c1.midpoint()]
+ [quadpy.c1.trapezoidal()]
+ [quadpy.c1.clenshaw_curtis(k) for k in range(2, 10)]
+ [quadpy.c1.gauss_legendre(k) for k in range(1, 6)]
+ [quadpy.c1.gauss_lobatto(k) for k in range(2, 7)]
+ [quadpy.c1.gauss_kronrod(k) for k in range(2, 7)]
+ [quadpy.c1.gauss_patterson(k) for k in range(9)]
+ [quadpy.c1.gauss_radau(k) for k in range(2, 10)]
+ [quadpy.c1.fejer_1(k) for k in range(1, 10)]
+ [quadpy.c1.fejer_2(k) for k in range(1, 10)]
+ [quadpy.c1.newton_cotes_closed(k) for k in range(1, 5)]
+ [quadpy.c1.newton_cotes_open(k) for k in range(1, 5)],
)
def test_scheme(scheme):
assert scheme.points.dtype in [np.float64, np.int64], scheme.name
assert scheme.weights.dtype in [np.float64, np.int64], scheme.name
# https://github.com/nschloe/quadpy/issues/227
assert scheme.weights.ndim == 1
# test scheme.__str__
print(scheme)
degree = 0
while True:
# Set bounds such that the values are between 0.5 and 1.5.
exact_val = 1.0 / (degree + 1)
interval = np.array(
[
[0.5 ** (1.0 / (degree + 1)), 0.0, 0.0],
[1.5 ** (1.0 / (degree + 1)), 0.0, 0.0],
]
)
interval = np.array([[0.3], [0.5]])
val = scheme.integrate(lambda x: x[0] ** degree, interval)
# same test with line embedded in R^2
interval = np.array(
[[0.5 ** (1.0 / (degree + 1)), 0.0], [1.5 ** (1.0 / (degree + 1)), 0.0]]
)
val = scheme.integrate(lambda x: x[0] ** degree, interval)
if abs(exact_val - val) > 1.0e-12 * abs(exact_val):
break
if degree >= scheme.degree:
break
degree += 1
assert degree == scheme.degree
@pytest.mark.parametrize(
"scheme", [quadpy.c1.chebyshev_gauss_1(k) for k in range(1, 10)]
)
def test_cheb1_scheme(scheme):
evaluator = orthopy.c1.chebyshev1.Eval(scheme.points, "normal")
k = 0
while True:
approximate = scheme.integrate(lambda x: next(evaluator), [-1, 1])
exact = math.sqrt(math.pi) if k == 0 else 0.0
err = np.abs(approximate - exact)
if np.any(err > 1.0e-14):
break
k += 1
max_err = np.max(err)
assert k - 1 >= scheme.degree, (
f"{scheme.name} -- observed: {k - 1}, expected: {scheme.degree} "
f"(max err: {max_err:.3e})"
)
@pytest.mark.parametrize(
"scheme", [quadpy.c1.chebyshev_gauss_2(k) for k in range(1, 10)]
)
def test_cheb2_scheme(scheme):
evaluator = orthopy.c1.chebyshev2.Eval(scheme.points, "normal")
k = 0
while True:
approximate = scheme.integrate(lambda x: next(evaluator), [-1, 1])
exact = math.sqrt(math.pi / 2) if k == 0 else 0.0
err = np.abs(approximate - exact)
if np.any(err > 1.0e-14):
break
k += 1
max_err = np.max(err)
assert k - 1 >= scheme.degree, (
f"{scheme.name} -- observed: {k - 1}, expected: {scheme.degree} "
f"(max err: {max_err:.3e})"
)
@pytest.mark.parametrize("scheme", [quadpy.c1.newton_cotes_closed(5)])
def test_show(scheme):
scheme.show()
def test_integrate_split():
x = np.linspace(0.15, 0.702, 101)
intervals = np.array([x[:-1], x[1:]])
scheme = quadpy.c1.trapezoidal()
val = scheme.integrate(
lambda r: 0.5108
/ r ** 2
/ np.sqrt(2 * 1.158 + 2 / r - 0.5108 ** 2 / (2 * r ** 2)),
intervals,
)
val = np.sum(val)
reference = 0.961715
assert abs(val - reference) < 1.0e-3 * reference
def test_legendre_mpmath():
mp.dps = 50
scheme = quadpy.c1.gauss_legendre(4, mode="mpmath")
tol = 1.0e-50
x1 = mp.sqrt(mp.mpf(3) / 7 - mp.mpf(2) / 7 * mp.sqrt(mp.mpf(6) / 5))
x2 = mp.sqrt(mp.mpf(3) / 7 + mp.mpf(2) / 7 * mp.sqrt(mp.mpf(6) / 5))
assert (abs(scheme.points_symbolic - [-x2, -x1, +x1, +x2]) < tol).all()
w1 = (18 + mp.sqrt(30)) / 36
w2 = (18 - mp.sqrt(30)) / 36
assert (abs(scheme.weights_symbolic - [w2, w1, w1, w2]) < tol).all()
def test_chebyshev1_sympy():
scheme = quadpy.c1.chebyshev_gauss_1(4, mode="sympy")
scheme_numpy = quadpy.c1.chebyshev_gauss_1(4, mode="numpy")
flt = np.vectorize(float)
tol = 1.0e-15
assert (abs(flt(scheme.points) - scheme_numpy.points) < tol).all()
assert (abs(flt(scheme.weights) - scheme_numpy.weights) < tol).all()
def test_chebyshev2_sympy():
scheme = quadpy.c1.chebyshev_gauss_2(4, mode="sympy")
scheme_numpy = quadpy.c1.chebyshev_gauss_2(4, mode="numpy")
flt = np.vectorize(float)
tol = 1.0e-15
assert (abs(flt(scheme.points) - scheme_numpy.points) < tol).all()
assert (abs(flt(scheme.weights) - scheme_numpy.weights) < tol).all()
def test_chebyshev1_mpmath():
mp.dps = 50
scheme = quadpy.c1.chebyshev_gauss_1(4, mode="mpmath")
tol = 1.0e-50
x1 = mp.cos(3 * mp.pi / 8)
x2 = mp.cos(1 * mp.pi / 8)
assert (abs(scheme.points_symbolic - [+x2, +x1, -x1, -x2]) < tol).all()
w = mp.pi / 4
tol = 1.0e-49
assert (abs(scheme.weights_symbolic - [w, w, w, w]) < tol).all()
def test_chebyshev2_mpmath():
mp.dps = 51
scheme = quadpy.c1.chebyshev_gauss_2(4, mode="mpmath")
tol = 1.0e-50
x1 = mp.cos(2 * mp.pi / 5)
x2 = mp.cos(1 * mp.pi / 5)
assert (abs(scheme.points_symbolic - [+x2, +x1, -x1, -x2]) < tol).all()
w1 = mp.pi / 5 * mp.sin(2 * mp.pi / 5) ** 2
w2 = mp.pi / 5 * mp.sin(1 * mp.pi / 5) ** 2
assert (abs(scheme.weights_symbolic - [w2, w1, w1, w2]) < tol).all()
def test_jacobi_mpmath():
mp.dps = 51
scheme = quadpy.c1.gauss_jacobi(4, 1, 1, mode="mpmath")
tol = 1.0e-50
x1 = mp.sqrt((7 - 2 * mp.sqrt(7)) / 21)
x2 = mp.sqrt((7 + 2 * mp.sqrt(7)) / 21)
assert (abs(scheme.points_symbolic - [-x2, -x1, +x1, +x2]) < tol).all()
w1 = (5 + mp.sqrt(7)) / 15
w2 = (5 - mp.sqrt(7)) / 15
assert (abs(scheme.weights_symbolic - [w2, w1, w1, w2]) < tol).all()
def test_multidim():
scheme = quadpy.c1.gauss_legendre(5)
# simple scalar integration
val = scheme.integrate(np.sin, [0.0, 1.0])
assert val.shape == ()
# scalar integration on 3 subdomains
val = scheme.integrate(np.sin, [[0.0, 1.0, 2.0], [1.0, 2.0, 3.0]])
assert val.shape == (3,)
# scalar integration in 3D
val = scheme.integrate(
lambda x: x[0] + np.sin(x[1]) + np.cos(x[2]),
[[0.0, 1.0, 2.0], [1.0, 2.0, 3.0]],
)
assert val.shape == ()
# vector-valued integration on 3 subdomains
val = scheme.integrate(
lambda x: [np.sin(x), np.cos(x)], [[0.0, 1.0, 2.0], [1.0, 2.0, 3.0]]
)
assert val.shape == (2, 3)
# vector-valued integration in 3D
val = scheme.integrate(
lambda x: [x[0] + np.sin(x[1]), np.cos(x[0]) * x[2]],
[[0.0, 1.0, 2.0], [1.0, 2.0, 3.0]],
)
assert val.shape == (2,)
# another vector-valued integration in 3D
# This is one case where the integration routine may not properly recognize the
# dimensionality of the domain. Use the `dim` parameter.
val = scheme.integrate(
lambda x: [
x[0] + np.sin(x[1]),
np.cos(x[0]) * x[2],
np.sin(x[0]) + x[1] + x[2],
],
[[0.0, 1.0, 2.0], [1.0, 2.0, 3.0]],
domain_shape=(3,),
)
assert val.shape == (3,)
if __name__ == "__main__":
test_multidim()
# scheme_ = quadpy.c1.Fejer2(20)
# # scheme_ = quadpy.c1.Midpoint()
# test_scheme(scheme_)
# test_show(scheme_)
# # import matplotlib.pyplot as plt
# # plt.savefig('demo.png', transparent=True)
|
mit
|
jereze/scikit-learn
|
examples/ensemble/plot_feature_transformation.py
|
67
|
4285
|
"""
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_curve
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator)
rt_lm = LogisticRegression()
rt.fit(X_train, y_train)
rt_lm.fit(rt.transform(X_train_lr), y_train_lr)
y_pred_rt = rt_lm.predict_proba(rt.transform(X_test))[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
|
bsd-3-clause
|
btgorman/RISE-power-water-ss-1phase
|
analysis_power_n2.py
|
1
|
29295
|
# Copyright 2017 Brandon T. Gorman
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# BUILT USING PYTHON 3.6.0
import ctypes as ct
import pandas as pd
import numpy as np
import random, csv, sys, os
import math
from statistics import median
import classes_water as ENC
import classes_power as ODC
import classes_interconnection as ICC
import grb_solvers
from comtypes import automation
import win32com.client
# from win32com.client import makepy
# sys.argv = ['makepy', 'OpenDSSEngine.DSS']
# makepy.main()
def main(dss_debug, write_cols, plf):
os_username = os.getlogin()
# --------------
# READ CSV FILES
# --------------
csv_curve = pd.read_csv('./data_water/network-water/2000curve.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_junction = pd.read_csv('./data_water/network-water/2100junction.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_reservoir = pd.read_csv('./data_water/network-water/2101reservoir.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_tank = pd.read_csv('./data_water/network-water/2102tank.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_pipe = pd.read_csv('./data_water/network-water/2200pipe.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_pump = pd.read_csv('./data_water/network-water/2201pump.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_valve = pd.read_csv('./data_water/network-water/2202valve.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_xycurve = pd.read_csv('./data_power/network-power/1000xycurve.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_regcontrol = pd.read_csv('./data_power/network-power/1100regcontrol.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_wiredata = pd.read_csv('./data_power/network-power/1200wiredata.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_linecode = pd.read_csv('./data_power/network-power/1201linecode.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_bus = pd.read_csv('./data_power/network-power/1300bus.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_vsource = pd.read_csv('./data_power/network-power/1301vsource.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_generator = pd.read_csv('./data_power/network-power/1302generator.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_load = pd.read_csv('./data_power/network-power/1303load.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_solarpv = pd.read_csv('./data_power/network-power/1304solarpv.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_windturbine = pd.read_csv('./data_power/network-power/1305windturbine.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_directconnection = pd.read_csv('./data_power/network-power/1400directconnection.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_cable = pd.read_csv('./data_power/network-power/1401cable.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_overheadline = pd.read_csv('./data_power/network-power/1402overheadline.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_twowindingtransformer = pd.read_csv('./data_power/network-power/1403twowindingtransformer.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_capacitor = pd.read_csv('./data_power/network-power/1404capacitor.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_reactor = pd.read_csv('./data_power/network-power/1405reactor.csv', sep=',', header=1, index_col=None, dtype=np.float64)
# csv_allcolumns= pd.read_csv('./data_power/network-power/allcolumns.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_pumpload = pd.read_csv('./data_interconnection/network-interconnection/9000pump-load.csv', sep=',', header=1, index_col=None, dtype=np.float64)
csv_tankgenerator = pd.read_csv('./data_interconnection/network-interconnection/9001generator-junction.csv', sep=',', header=1, index_col=None, dtype=np.float64)
# -----------------
# CREATE COMPONENTS
# -----------------
object_curve = ENC.Curve(csv_curve)
object_junction = ENC.Junction(csv_junction)
object_reservoir = ENC.Reservoir(csv_reservoir)
object_tank = ENC.Tank(csv_tank)
object_pipe = ENC.Pipe(csv_pipe)
object_pump = ENC.Pump(csv_pump)
object_valve = ENC.Valve(csv_valve)
object_xycurve = ODC.XYCurve(csv_xycurve)
object_regcontrol = ODC.RegControl(csv_regcontrol)
object_wiredata = ODC.WireData(csv_wiredata)
object_linecode = ODC.LineCode(csv_linecode)
object_bus = ODC.Bus(csv_bus)
object_vsource = ODC.VSource(csv_vsource)
object_generator = ODC.Generator(csv_generator)
object_load = ODC.Load(csv_load)
object_solarpv = ODC.SolarPV(csv_solarpv)
object_windturbine = ODC.WindTurbine(csv_windturbine, object_xycurve)
object_directconnection = ODC.DirectConnection(csv_directconnection)
object_cable = ODC.Cable(csv_cable)
object_overheadline = ODC.OverheadLine(csv_overheadline)
object_twowindingtransformer = ODC.TwoWindingTransformer(csv_twowindingtransformer)
object_capacitor = ODC.Capacitor(csv_capacitor)
object_reactor = ODC.Reactor(csv_reactor)
object_pumpload = ICC.PumpLoad(csv_pumpload)
object_tankgenerator = ICC.GeneratorJunction(csv_tankgenerator)
# -----------------------
# ADD COMPONENTS TO LISTS
# -----------------------
w_object_list = [object_junction, object_reservoir, object_tank, # Water NODES
object_pipe, object_pump, object_valve, # Water LINKS
object_curve] # Water SYSTEM OPS
object_list = [object_vsource, object_bus, object_generator, object_load, object_solarpv, object_windturbine, #NODES
object_xycurve, object_wiredata, object_linecode, #OTHERS
object_directconnection, object_cable, object_overheadline, object_twowindingtransformer, object_capacitor, object_reactor, # CONNECTIONS
object_regcontrol] # CONTROLS
interconn_dict = {'pumpload': object_pumpload, 'tankgenerator': object_tankgenerator,
'pump': object_pump, 'load': object_load, 'tank': object_tank,
'generator': object_generator, 'junction': object_junction}
# ---------
# RUN EPANET and OPENDSS
# ---------
def run_EPANET():
filedir = 'C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/data_water/en-inputs/en-input.inp'
with open(filedir, 'w', newline='\n') as csvfile:
writer = csv.writer(csvfile, delimiter=' ')
templist = ['[TITLE]']
writer.writerow(templist)
writer.writerow('')
for water_object in w_object_list:
water_object.createAllEN(writer, interconn_dict)
templist = ['[ENERGY]']
writer.writerow(templist)
templist = ['Global', 'Efficiency', 75]
writer.writerow(templist)
templist = ['Global', 'Price', 0]
writer.writerow(templist)
templist = ['Demand', 'Charge', 0]
writer.writerow(templist)
writer.writerow('')
templist = ['[REACTIONS]']
writer.writerow(templist)
templist = ['Order', 'Bulk', 1]
writer.writerow(templist)
templist = ['Order', 'Tank', 1]
writer.writerow(templist)
templist = ['Order', 'Wall', 1]
writer.writerow(templist)
templist = ['Global', 'Bulk', 0]
writer.writerow(templist)
templist = ['Global', 'Wall', 0]
writer.writerow(templist)
templist = ['Limiting', 'Potential', 0]
writer.writerow(templist)
templist = ['Roughness', 'Correlation', 0]
writer.writerow(templist)
writer.writerow('')
templist = ['[TIMES]']
writer.writerow(templist)
templist = ['Duration', '0:00:10']
writer.writerow(templist)
templist = ['Hydraulic', 'Timestep', '0:00:10']
writer.writerow(templist)
templist = ['Quality', 'Timestep', '0:05']
writer.writerow(templist)
templist = ['Pattern', 'Timestep', '1:00']
writer.writerow(templist)
templist = ['Pattern', 'Start', '0:00']
writer.writerow(templist)
templist = ['Report', 'Timestep', '1:00']
writer.writerow(templist)
templist = ['Report', 'Start', '0:00']
writer.writerow(templist)
templist = ['Start', 'ClockTime', 12, 'am']
writer.writerow(templist)
templist = ['Statistic', 'NONE']
writer.writerow(templist)
writer.writerow('')
templist = ['[REPORT]']
writer.writerow(templist)
templist = ['Status', 'No']
writer.writerow(templist)
templist = ['Summary', 'No']
writer.writerow(templist)
templist = ['Page', 0]
writer.writerow(templist)
writer.writerow('')
templist = ['[OPTIONS]']
writer.writerow(templist)
templist = ['Units', 'GPM'] #GPM is US Customary units
writer.writerow(templist)
templist = ['Headloss', 'H-W']
writer.writerow(templist)
templist = ['Specific', 'Gravity', 1]
writer.writerow(templist)
templist = ['Viscosity', 1]
writer.writerow(templist)
templist = ['Trials', 40]
writer.writerow(templist)
templist = ['Accuracy', 0.001]
writer.writerow(templist)
templist = ['CHECKFREQ', 2]
writer.writerow(templist)
templist = ['MAXCHECK', 10]
writer.writerow(templist)
templist = ['DAMPLIMIT', 0]
writer.writerow(templist)
templist = ['Unbalanced', 'Continue', 10]
writer.writerow(templist)
templist = ['Pattern', 1]
writer.writerow(templist)
templist = ['Demand', 'Multiplier', 1.0]
writer.writerow(templist)
templist = ['Emitter', 'Exponent', 0.5]
writer.writerow(templist)
templist = ['Quality', 'None', 'mg/L']
writer.writerow(templist)
templist = ['Diffusivity', 1]
writer.writerow(templist)
templist = ['Tolerance', 0.01]
writer.writerow(templist)
writer.writerow('')
templist=['[END]']
writer.writerow(templist)
epalib = ct.cdll.LoadLibrary('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/data_water/epanet2mingw64.dll')
# Byte objects
en_input_file = ct.c_char_p(filedir.encode('utf-8'))
en_report_file = ct.c_char_p(str('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/data_water/en-outputs/out.rpt').encode('utf-8'))
en_byte_file = ct.c_char_p(''.encode('utf-8'))
# Send strings as char* to the epalib function
errorcode = epalib.ENopen(en_input_file, en_report_file, en_byte_file)
if errorcode != 0:
print(1, 'ERRORCODE is', errorcode)
errorcode = epalib.ENopenH()
if errorcode != 0:
print(2, 'ERRORCODE is', errorcode)
init_flag = ct.c_int(0)
errorcode = epalib.ENinitH(init_flag)
if errorcode != 0:
print(3, 'ERRORCODE is', errorcode)
time = ct.pointer(ct.c_long(1))
timestep = ct.pointer(ct.c_long(1))
while True:
errorcode = epalib.ENrunH(time)
if errorcode != 0:
print(4, 'ERRORCODE is', errorcode)
for water_object in w_object_list:
water_object.readAllENoutputs(epalib)
errorcode = epalib.ENnextH(timestep)
if errorcode != 0:
print(5, 'ERRORCODE is', errorcode)
if timestep.contents.value == 0:
break
errorcode = epalib.ENcloseH()
if errorcode != 0:
print(6, 'ERRORCODE is', errorcode)
errorcode = epalib.ENclose()
if errorcode != 0:
print(7, 'ERRORCODE is', errorcode)
input_list_continuous = []
input_list_categorical = []
input_tensor_continuous = np.empty([0,0], dtype=np.float64).flatten()
input_tensor_categorical = np.empty([0,0], dtype=np.float64).flatten()
for object in w_object_list:
list_continuous, list_categorical, tensor_continuous, tensor_categorical = object.convertToInputTensor()
input_list_continuous = input_list_continuous + list_continuous
input_list_categorical = input_list_categorical + list_categorical
input_tensor_continuous = np.concatenate((input_tensor_continuous, tensor_continuous), axis=0)
input_tensor_categorical = np.concatenate((input_tensor_categorical, tensor_categorical), axis=0)
output_list = []
output_tensor = np.empty([0,0], dtype=np.float64).flatten()
for object in w_object_list:
o_list, o_tensor = object.convertToOutputTensor()
output_list = output_list + o_list
output_tensor = np.concatenate((output_tensor, o_tensor), axis=0)
return input_list_continuous, input_list_categorical, output_list, input_tensor_continuous, input_tensor_categorical, output_tensor
def run_OpenDSS(dss_debug, solverFlag):
# SET SOURCEBUS
# VsourceClass.sourcebus = vsourceobj.id[1]
dssObj = win32com.client.Dispatch('OpenDSSEngine.DSS') # OPENDSS COMPORT
dssObj.AllowForms = False
dssText = dssObj.Text
dssCkt = dssObj.ActiveCircuit
dssSolution = dssCkt.Solution
dssActvElem = dssCkt.ActiveCktElement
dssActvBus = dssCkt.ActiveBus
dssText.Command = 'Clear'
dssText.Command = 'Set DataPath=\'C:\\Users\\'+os_username+'\\Documents\\OpenDSS'
dssText.Command = 'Set DefaultBaseFrequency=60'
for object in object_list:
object.createAllDSS(dssText, interconn_dict, dss_debug)
set_voltagebase = set()
for object in object_list:
set_voltagebase = set_voltagebase | object.voltagesToSets()
dssText.Command = 'Set VoltageBases={}'.format(list(set_voltagebase))
dssText.Command = 'CalcVoltageBases'
dssText.Command = 'Solve BaseFrequency=60 MaxIter=300'
variant_buses = automation.VARIANT()
variant_voltages_mag = automation.VARIANT()
variant_voltages_pu = automation.VARIANT()
variant_currents = automation.VARIANT()
variant_powers = automation.VARIANT()
for object in object_list:
object.readAllDSSOutputs(dssCkt, dssActvElem, dssActvBus, variant_buses, variant_voltages_mag, variant_voltages_pu, variant_currents, variant_powers)
if solverFlag == False:
# dssText.Command = 'Save Circuit'
# dssText.Command = 'Export Summary (summary.csv)'
# dssText.Command = 'Export Currents (currents.csv)'
# dssText.Command = 'Export Voltages (voltages.csv)'
# dssText.Command = 'Export Overloads (overloads.csv)'
# dssText.Command = 'Export Powers kVA (powers.csv)'
input_list_continuous = []
input_list_categorical = []
input_tensor_continuous = np.empty([0,0], dtype=np.float64).flatten()
input_tensor_categorical = np.empty([0,0], dtype=np.float64).flatten()
for object in object_list:
list_continuous, list_categorical, tensor_continuous, tensor_categorical = object.convertToInputTensor()
input_list_continuous = input_list_continuous + list_continuous
input_list_categorical = input_list_categorical + list_categorical
input_tensor_continuous = np.concatenate((input_tensor_continuous, tensor_continuous), axis=0)
input_tensor_categorical = np.concatenate((input_tensor_categorical, tensor_categorical), axis=0)
output_list = []
output_tensor = np.empty([0,0], dtype=np.float64).flatten()
for object in object_list:
o_list, o_tensor = object.convertToOutputTensor()
output_list = output_list + o_list
output_tensor = np.concatenate((output_tensor, o_tensor), axis=0)
return input_list_continuous, input_list_categorical, output_list, input_tensor_continuous, input_tensor_categorical, output_tensor
else:
losses = dssCkt.Losses
return float(losses[0])*0.001 # kW
# SIM STEP 1: SET LOAD CURVES
# ------------------------------
power_load_factor = plf
power_factor = 0.0
object_load.multiplyLoadFactor(power_load_factor, power_factor)
print('power load factor', power_load_factor)
# SIM STEP 2: SET GENERATOR DISPATCH
# ----------------------------------
exports = 0.0 # kW
losses = 0.0 # kW
def fun_set_power_dispatch(object_load, object_generator, losses, exports):
counter = 0
lost_min = 10000000.0
while True:
needreserves, actualreserves, reserves_dict = grb_solvers.unit_commitment_priority_list_n2(object_load, object_generator, losses, exports) # unit commitment is variable
new_loss = run_OpenDSS(0, True)
counter += 1
if math.fabs(losses - new_loss) > 1.0:
if counter > 199:
print('Dispatcher - Losses/Exports did not converge')
sys.exit()
elif counter > 150:
while True:
object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS] = dispatcher_max
needreserves, actualreserves, reserves_dict = grb_solvers.unit_commitment_priority_list_2_n2(object_load, object_generator, losses, exports) # unit commitment is input
new_loss = run_OpenDSS(0, True)
counter +=1
if math.fabs(losses - new_loss) < 1.0:
return needreserves, actualreserves, reserves_dict
else:
losses += 0.8 * (new_loss - losses)
elif counter > 100:
while True:
object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS] = dispatcher_min
needreserves, actualreserves, reserves_dict = grb_solvers.unit_commitment_priority_list_2_n2(object_load, object_generator, losses, exports) # unit commitment is input
new_loss = run_OpenDSS(0, True)
counter +=1
if math.fabs(losses - new_loss) < 1.0:
return needreserves, actualreserves, reserves_dict
else:
losses += 0.8 * (new_loss - losses)
elif counter > 50:
if math.fabs(new_loss) < math.fabs(lost_min):
lost_min = new_loss
dispatcher_min = np.array(object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS], copy=True)
else:
dispatcher_max = np.array(object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS], copy=True)
losses += 0.8*(new_loss - losses)
else:
return needreserves, actualreserves, reserves_dict
needed_reserves, actual_reserves, dict_reserves = fun_set_power_dispatch(object_load, object_generator, losses, exports)
print('exports #1', 0.5 * (object_cable.matrix[33, ODC.Cable.REAL_POWER_2] - object_cable.matrix[33, ODC.Cable.REAL_POWER_1]))
print('')
pre_contingency_branch_max = 0.0
for cable in object_cable.matrix:
if cable[ODC.Cable.ID] in [10.0, 100.0]:
pass
else:
if abs(cable[ODC.Cable.A_PU_CAPACITY]) > pre_contingency_branch_max:
pre_contingency_branch_max = abs(cable[ODC.Cable.A_PU_CAPACITY])
base_gen_commitment = np.array(object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS], copy=True)
base_gen_dispatch = np.array(object_generator.matrix[:, ODC.Generator.REAL_GENERATION], copy=True)
list_gen_mint = []
list_gen_error = []
list_gen_post_branch_load = []
list_gen_resp_branch_load = []
base_branch_commitment = np.array(object_cable.matrix[:, ODC.Cable.OPERATIONAL_STATUS_A], copy=True)
list_branch_mint = []
list_branch_error = []
list_branch_post_branch_load = []
list_branch_resp_branch_load = []
print('Generators')
for row in object_generator.matrix:
object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS] = np.array(base_gen_commitment, copy=True)
object_generator.matrix[:, ODC.Generator.REAL_GENERATION] = np.array(base_gen_dispatch, copy=True)
if row[ODC.Generator.REAL_GENERATION] != 0.0:
row[ODC.Generator.REAL_GENERATION] = 0.0
row[ODC.Generator.OPERATIONAL_STATUS] = 0.0
for row2 in object_generator.matrix:
if row2[ODC.Generator.REAL_GENERATION] != 0.0 and row2[ODC.Generator.ID] in [118.0, 121.0, 323.0]:
print('GEN ID {} and {}'.format(int(row[ODC.Generator.ID]), int(row2[ODC.Generator.ID])))
object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS] = np.array(base_gen_commitment, copy=True)
object_generator.matrix[:, ODC.Generator.REAL_GENERATION] = np.array(base_gen_dispatch, copy=True)
row[ODC.Generator.REAL_GENERATION] = 0.0
row[ODC.Generator.OPERATIONAL_STATUS] = 0.0
row2[ODC.Generator.REAL_GENERATION] = 0.0
row2[ODC.Generator.OPERATIONAL_STATUS] = 0.0
run_OpenDSS(0, True)
branch_idx = 0
branch_max = 0.0
for idx in range(0, len(object_cable.matrix)):
if abs(object_cable.matrix[idx, ODC.Cable.A_PU_CAPACITY]) > branch_max:
branch_idx = idx
branch_max = abs(object_cable.matrix[idx, ODC.Cable.A_PU_CAPACITY])
# print(branch_idx)
# print(branch_max)
list_gen_post_branch_load.append(abs(object_cable.matrix[branch_idx, ODC.Cable.A_PU_CAPACITY]))
list_gen_mint.append(grb_solvers.contingency_response(object_load, object_generator, object_cable))
if list_gen_mint[-1] > 10.0:
print('GEN ID {}'.format(int(row[ODC.Generator.ID])))
print(list_gen_mint[-1])
print('')
run_OpenDSS(0, True)
list_gen_resp_branch_load.append(abs(object_cable.matrix[branch_idx, ODC.Cable.A_PU_CAPACITY]))
list_gen_error.append(0.5*(object_cable.matrix[34-1, ODC.Cable.REAL_POWER_2] - object_cable.matrix[34-1, ODC.Cable.REAL_POWER_1]))
print('Cables')
for row in object_cable.matrix:
object_generator.matrix[:, ODC.Generator.OPERATIONAL_STATUS] = np.array(base_gen_commitment, copy=True)
object_generator.matrix[:, ODC.Generator.REAL_GENERATION] = np.array(base_gen_dispatch, copy=True)
object_cable.matrix[:, ODC.Cable.OPERATIONAL_STATUS_A] = np.array(base_branch_commitment, copy=True)
run_OpenDSS(0, True)
if row[ODC.Cable.ID] not in [10.0, 100.0]:
if row[ODC.Cable.OPERATIONAL_STATUS_A] == 1.0:
row[ODC.Cable.OPERATIONAL_STATUS_A] = 0.0
run_OpenDSS(0, True)
branch_idx = 0
branch_max = 0.0
for idx in range(0, len(object_cable.matrix)):
if abs(object_cable.matrix[idx, ODC.Cable.A_PU_CAPACITY]) > branch_max:
branch_idx = idx
branch_max = abs(object_cable.matrix[idx, ODC.Cable.A_PU_CAPACITY])
list_branch_post_branch_load.append(abs(object_cable.matrix[branch_idx, ODC.Cable.A_PU_CAPACITY]))
list_branch_mint.append(grb_solvers.contingency_response(object_load, object_generator, object_cable))
if list_branch_mint[-1] > 10.0:
print('BRANCH ID {}'.format(int(row[ODC.Cable.ID])))
print(list_branch_mint[-1])
print('')
run_OpenDSS(0, True)
list_branch_resp_branch_load.append(abs(object_cable.matrix[branch_idx, ODC.Cable.A_PU_CAPACITY]))
list_branch_error.append(0.5*(object_cable.matrix[34-1, ODC.Cable.REAL_POWER_2] - object_cable.matrix[34-1, ODC.Cable.REAL_POWER_1]))
object_cable.matrix[:, ODC.Cable.OPERATIONAL_STATUS_A] = np.array(base_branch_commitment, copy=True)
print('')
max_gen_mint = max(list_gen_mint)
med_gen_mint = median(list_gen_mint)
avg_gen_mint = sum(list_gen_mint) / len(list_gen_mint)
max_gen_error = max(max(list_gen_error), abs(min(list_gen_error)))
med_gen_error = median(list_gen_error)
avg_gen_error = sum(list_gen_error) / len(list_gen_error)
max_gen_branch_load = max(list_gen_post_branch_load)
max_gen_branch_idx = list_gen_post_branch_load.index(max_gen_branch_load)
max_gen_post_branch_load = list_gen_post_branch_load[max_gen_branch_idx]
max_gen_resp_branch_load = list_gen_resp_branch_load[max_gen_branch_idx]
max_branch_mint = max(list_branch_mint)
med_branch_mint = median(list_branch_mint)
avg_branch_mint = sum(list_branch_mint) / len(list_branch_mint)
max_branch_error = max(max(list_branch_error), abs(min(list_branch_error)))
med_branch_error = median(list_branch_error)
avg_branch_error = sum(list_branch_error) / len(list_branch_error)
max_branch_branch_load = max(list_branch_post_branch_load)
max_branch_branch_idx = list_branch_post_branch_load.index(max_branch_branch_load)
max_branch_post_branch_load = list_branch_post_branch_load[max_branch_branch_idx]
max_branch_resp_branch_load = list_branch_resp_branch_load[max_branch_branch_idx]
with open('reserves_n2.csv', 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([power_load_factor, needed_reserves, actual_reserves])
with open('gen_response_n2.csv', 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([power_load_factor, pre_contingency_branch_max, max_gen_post_branch_load, max_gen_resp_branch_load, max_gen_mint, med_gen_mint, avg_gen_mint, max_gen_error, med_gen_error, avg_gen_error])
with open('branch_response_n2.csv', 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([power_load_factor, pre_contingency_branch_max, max_branch_post_branch_load, max_branch_resp_branch_load, max_branch_mint, med_branch_mint, avg_branch_mint, max_branch_error, med_branch_error, avg_branch_error])
# counter = 0
# for row in object_generator.matrix:
# if row[ODC.Generator.REAL_GENERATION] != 0.0:
# if counter == 5: # 4 5 6 # 4 5 6
# # print('generator', row[ODC.Generator.ID], 'is offline!')
# row[ODC.Generator.REAL_GENERATION] = 0.0
# row[ODC.Generator.OPERATIONAL_STATUS] = 0.0
# break
# counter += 1
# counter = 0
# for row in object_cable.matrix:
# if row[ODC.Cable.ID] != 10.0 or row[ODC.Cable.ID] != 100.0:
# if counter == 17: # 12, 17
# print('cable', row[ODC.Cable.ID], 'is offline!')
# row[ODC.Cable.OPERATIONAL_STATUS_A] = 0.0
# break
# counter += 1
# counter = 0
# min_load_idx = 0
# min_load = 100000000.0
# second_min_load_idx = 0
# second_min_load = 0.
# for row in object_load.matrix:
# if row[ODC.Load.REAL_LOAD] < min_load:
# second_min_load_idx = min_load_idx
# second_min_load = min_load
# min_load = row[ODC.Load.REAL_LOAD]
# min_load_idx = counter
# counter += 1
# run_OpenDSS(0, True)
# print('load', object_load.matrix[min_load_idx, ODC.Load.ID], 'is offline!', object_load.matrix[min_load_idx, ODC.Load.REAL_LOAD])
# print('second min load is', object_load.matrix[second_min_load_idx, ODC.Load.ID], object_load.matrix[second_min_load_idx, ODC.Load.REAL_LOAD])
# object_load.matrix[min_load_idx, ODC.Load.REAL_LOAD] = 0.0
# branch_id_to_check = 34
# print('branch ID',object_cable.matrix[branch_id_to_check-1, ODC.Cable.ID],'has power',0.5*(object_cable.matrix[branch_id_to_check-1, ODC.Cable.REAL_POWER_2] - object_cable.matrix[branch_id_to_check-1, ODC.Cable.REAL_POWER_1]))
# print('max line load pt1', max(np.absolute(object_cable.matrix[:, ODC.Cable.A_PU_CAPACITY])))
# minutes_to_respond = grb_solvers.contingency_response(object_load, object_generator, object_cable)
# run_OpenDSS(0, True)
# print('branch',object_cable.matrix[branch_id_to_check-1, ODC.Cable.ID],'has power',0.5*(object_cable.matrix[branch_id_to_check-1, ODC.Cable.REAL_POWER_2] - object_cable.matrix[branch_id_to_check-1, ODC.Cable.REAL_POWER_1]))
# print('exports #2', 0.5 * (object_cable.matrix[33, ODC.Cable.REAL_POWER_2] - object_cable.matrix[33, ODC.Cable.REAL_POWER_1]))
# print('max line load pt2', max(np.absolute(object_cable.matrix[:, ODC.Cable.A_PU_CAPACITY])))
# print('')
# SIM STEP 3: RUN POWER-WATER SIMULATION
# --------------------------------------
# input_list_continuous, input_list_categorical, _, input_tensor_continuous, input_tensor_categorical, _ = run_OpenDSS(dss_debug, False)
# input_list_continuous1, input_list_categorical1, _, input_tensor_continuous1, input_tensor_categorical1, _ = run_EPANET()
# _, _, output_list, _, _, output_tensor = run_OpenDSS(dss_debug, False)
# _, _, output_list1, _, _, output_tensor1 = run_EPANET()
# RESULTS STEP 1: FORMAT INPUT/OUTPUT TENSORS
# -------------------------------------------
# input_list_continuous = input_list_continuous + input_list_continuous1
# input_list_categorical = input_list_categorical + input_list_categorical1
# output_list = output_list + output_list1
# input_tensor_continuous = np.concatenate((input_tensor_continuous, input_tensor_continuous1), axis=0)
# input_tensor_categorical = np.concatenate((input_tensor_categorical, input_tensor_categorical1), axis=0)
# output_tensor = np.concatenate((output_tensor, output_tensor1), axis=0)
# RESULTS STEP 2: WRITE INPUT/OUTPUT TENSORS TO FILE
# --------------------------------------------------
# if write_cols:
# with open('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/tensor_outputs/input_list_continuous_columns.csv', 'w') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerow(input_list_continuous)
# with open('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/tensor_outputs/input_list_categorical_columns.csv', 'w') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerow(input_list_categorical)
# with open('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/tensor_outputs/output_list_columns.csv', 'w') as f:
# writer = csv.writer(f, delimiter=',')
# writer.writerow(output_list)
# with open('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/tensor_outputs/input_tensor_continuous.csv', 'ab') as f:
# np.savetxt(f, input_tensor_continuous[None, :], fmt='%0.6f', delimiter=' ', newline='\n')
# with open('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/tensor_outputs/input_tensor_categorical.csv', 'ab') as f:
# np.savetxt(f, input_tensor_categorical[None, :], fmt='%0.6f', delimiter=' ', newline='\n')
# with open('C:/Users/'+os_username+'/Documents/git/RISE-power-water-ss-1phase/tensor_outputs/output_tensor.csv', 'ab') as f:
# np.savetxt(f, output_tensor[None, :], fmt='%0.6f', delimiter=' ', newline='\n')
# END
# ---
if __name__ == '__main__':
write_cols = False # Write column names to seperate file
dss_debug = 0
power_load_factor = float(sys.argv[1])
main(dss_debug, write_cols, power_load_factor)
|
apache-2.0
|
smblance/ggplot
|
ggplot/tests/test_faceting.py
|
12
|
1894
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import get_assert_same_ggplot, cleanup
assert_same_ggplot = get_assert_same_ggplot(__file__)
from ggplot import *
import numpy as np
import pandas as pd
def _build_testing_df():
df = pd.DataFrame({
"x": np.arange(0, 10),
"y": np.arange(0, 10),
"z": np.arange(0, 10),
"a": [1,1,1,1,1,2,2,2,3,3]
})
df['facets'] = np.where(df.x > 4, 'over', 'under')
df['facets2'] = np.where((df.x % 2) == 0, 'even', 'uneven')
return df
def _build_small_df():
return pd.DataFrame({
"x": [1, 2, 1, 2],
"y": [1, 2, 3, 4],
"a": ["a", "b", "a", "b"],
"b": ["c", "c", "d", "d"]
})
# faceting with bar plots does not work yet: see https://github.com/yhat/ggplot/issues/196
#@cleanup
#def test_facet_grid_descrete():
# df = _build_testing_df()
# gg = ggplot(aes(x='a'), data=df)
# assert_same_ggplot(gg + geom_bar() + facet_grid(x="facets", y="facets2"),
# "faceting_grid_descrete")
#@cleanup
#def test_facet_wrap_descrete():
# df = _build_testing_df()
# gg = ggplot(aes(x='a'), data=df)
# assert_same_ggplot(gg + geom_bar() + facet_wrap(x="facets"), "faceting_wrap_descrete")
@cleanup
def test_facet_grid_continous():
df = _build_testing_df()
p = ggplot(aes(x='x', y='y', colour='z'), data=df)
p = p + geom_point() + scale_colour_gradient(low="blue", high="red")
p = p + facet_grid("facets", "facets2")
assert_same_ggplot(p, "faceting_grid_continous")
@cleanup
def test_facet_wrap_continous():
df = _build_testing_df()
p = ggplot(aes(x='x', y='y', colour='z'), data=df)
p = p + geom_point() + scale_colour_gradient(low="blue", high="red")
p = p + facet_wrap("facets")
assert_same_ggplot(p, "faceting_wrap_continous")
|
bsd-2-clause
|
mjudsp/Tsallis
|
examples/plot_johnson_lindenstrauss_bound.py
|
67
|
7474
|
r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
|
bsd-3-clause
|
linebp/pandas
|
pandas/tests/indexes/datetimes/test_datetimelike.py
|
9
|
2526
|
""" generic tests from the Datetimelike class """
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas import Series, Index, DatetimeIndex, date_range
from ..datetimelike import DatetimeLike
class TestDatetimeIndex(DatetimeLike):
_holder = DatetimeIndex
def setup_method(self, method):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
tm.assert_index_equal(result, expected)
def test_pickle_compat_construction(self):
pass
def test_intersection(self):
first = self.index
second = self.index[5:]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
assert tm.equalContents(result, second)
third = Index(['a', 'b', 'c'])
result = first.intersection(third)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_union(self):
first = self.index[:5]
second = self.index[5:]
everything = self.index
union = first.union(second)
assert tm.equalContents(union, everything)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
assert tm.equalContents(result, everything)
|
bsd-3-clause
|
appapantula/scikit-learn
|
examples/decomposition/plot_image_denoising.py
|
181
|
5819
|
"""
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
|
bsd-3-clause
|
ElDeveloper/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
4
|
66251
|
"""Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import inspect
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
Entries
-------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = (bounds == "fixed")
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels."""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr, value in sorted(self.__dict__.items()):
if attr.startswith("hyperparameter_"):
r.append(value)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(getattr(self, hyperparameter.name))
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
setattr(self, hyperparameter.name,
np.exp(theta[i:i + hyperparameter.n_elements]))
i += hyperparameter.n_elements
else:
setattr(self, hyperparameter.name, np.exp(theta[i]))
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1."""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y)."""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels."""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i*k_dims:(i+1)*k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators. """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
self.hyperparameter_constant_value = \
Hyperparameter("constant_value", "numeric", constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
self.hyperparameter_noise_level = \
Hyperparameter("noise_level", "numeric", noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
if np.iterable(length_scale):
if len(length_scale) > 1:
self.anisotropic = True
self.length_scale = np.asarray(length_scale, dtype=np.float)
else:
self.anisotropic = False
self.length_scale = float(length_scale[0])
else:
self.anisotropic = False
self.length_scale = float(length_scale)
self.length_scale_bounds = length_scale_bounds
if self.anisotropic: # anisotropic length_scale
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds,
len(length_scale))
else:
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or self.length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (self.length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
raise Exception("Anisotropic kernels require that the number "
"of length scales and features match.")
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, self.length_scale)
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (self.length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0/3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else: # isotropic
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_alpha = \
Hyperparameter("alpha", "numeric", alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_periodicity = \
Hyperparameter("periodicity", "numeric", periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
self.hyperparameter_sigma_0 = \
Hyperparameter("sigma_0", "numeric", sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.hyperparameter_gamma = \
Hyperparameter("gamma", "numeric", gamma_bounds)
self.metric = metric
if pairwise_kernels_kwargs is not None:
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
else:
self.pairwise_kernels_kwargs = {}
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**self.pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **self.pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
|
bsd-3-clause
|
oche-jay/vEQ-benchmark
|
data_analysis/vEQdata_analysis.py
|
1
|
14646
|
'''
Created on 15 Jun 2015
@author: ooe
'''
import os, time, datetime
import traceback
from database import vEQ_database as vqdb
import numpy
from util import getConfidence
import logging
import matplotlib.pyplot as plt
logging.getLogger().setLevel(logging.DEBUG)
PATH_TO_DB = '/Users/oche/vEQ-benhmark_i5/vEQ-benchmark/vEQ_db.sqlite'
PATH_TO_DB = '/Users/oche/vEQ-benhmark_PI/vEQ-benchmark/vEQ_db.sqlite'
# PATH_TO_DB = 'C:/Users/ooe/Documents/linux_vEQ_db.sqlite'
dbpath = os.path.abspath(PATH_TO_DB)
vEQdb = vqdb.vEQ_database(dbpath)
s =time.time()
def getMatchListFromTuple(movieList, tupleList, indexToRetrieve=1):
'''
Util function to get you the matching index for a list of titles
'''
matchingValues=[]
for title in movieList:
value = 0
for x in tupleList:
if title in x[0]:
value = x[indexToRetrieve]
# matchingValues.append(value)
break #go to next title
matchingValues.append(value)
return matchingValues
def plotMultiplePowerBarsForTitle(x0=None, x0_errs=None, x1=None, x1_errs=None, x2=None, x2_errs=None, **kwargs):
"""
Plot up to multiple series on a bar chart
x
x1
x3: the 3rd of the series to plot
"""
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%H%M%S')
xlabel = kwargs.get('xlabel', "Default X Lable")
title = kwargs.get('title', "HD and UHD Power Usage")
filename = kwargs.get('filename', "")
plt.rcParams['pdf.fonttype']=42 #to fix issue with wierd percentage symbols on Mac OSX
N = len(x0)
ind = numpy.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
fig, ax = plt.subplots()
padding = 0.5
rects1 = ax.bar(padding+ind, x0, width, color='g', yerr=x0_errs, ecolor='b')
rects2 = ax.bar(padding+ind+width, x1, width, color='y', yerr=x1_errs, ecolor='b')
rects3 = ax.bar(padding+ind+width+width, x2, width, color='b', yerr=x2_errs, ecolor='b')
ax.set_ylim([40,120])
ax.set_xticks(padding+ind+width+(width/2))
ax.set_xticklabels(xlabel, rotation=45, ha='right')
ax.set_ylabel(r'System Power $- P_a$ (W)' );
ax.legend( (rects1[0], rects2[0], rects3[0]), ('720p', '1080p', '2160p') )
ax.grid(True)
plt.title(title)
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
#
# autolabel(rects1)
# autolabel(rects2)
plt.gcf().tight_layout()
plt.savefig("/Users/oche/Dropbox/Documents/Papers/ism2015/figures/benchmark-results-by-title-HD.eps")
plt.show()
def processPowerandCPUdata(x_values, summary_by, targets):
vcs = []
powers= []
power_confs = []
cpus = []
cpus_confs = []
targets_isNumerical = False
logging.debug("targets=%s" % targets)
d = {}
if targets:
try:
targets = map(int, targets)
# trying to group target heights to accommodate intemediary heights
targets_isNumerical = True
logging.debug("targets is numerical")
except:
print "not an numerical array"
traceback.print_exc()
pass
finally:
# create a dict for grouped values
d = {vc : {"cpu": [],"pow":[]} for vc in targets}
else:
d = {vc : {"cpu": [],"pow":[]} for vc in (x[0] for x in x_values)}
"""ideally, xvalues is a summary of all distinct heights or codecs in the database (sorted)"""
for value in x_values:
vc = str(value[0])
if (vc == ("-1" or None or "NULL" or -1) ):
logging.warn("found %s for height - continue" % vc)
continue
oldvc = vc
if targets_isNumerical:
"""
For numerical targets (values for the x axis), i.e. a list of numbers for heights, approximate the height of a video e.g 286 from the databse
to a standard video height e.g. 240. It is better to do this via a query than to change the data from the database.
Especially, when its time to do regression analyses/
Note that targets may not always be numerical e.g a list of codecs or itags to plot on the xaxs
"""
# http://stackoverflow.com/questions/12141150/from-list-of-integers-get-number-closest-to-a-given-val
vc = min(targets, key=lambda x : abs(x-int(vc) ))
logging.info("old value, new value:%s, %s" % (oldvc, vc) )
print summary_by
for each_tuple in summary_by:
logging.debug(each_tuple)
# this code isnt ideal as it has (O(n x m) running time, where n is the length of the returned values )
# it could be slighlty less, even though it wont be ideal either, where n reduces for every iteration
seen = False
if str(oldvc) in str(each_tuple):
d[vc]['pow'].append(each_tuple[1])
d[vc]['cpu'].append(each_tuple[2])
else:
if seen:
break
else:
continue
if targets:
vcs = targets
else:
vcs = (x for x[0] in x_values )
for vc in vcs:
power_tup=d[vc]['pow']
cpu_tup= d[vc]['cpu']
power_np = numpy.array(power_tup)
cpu_np = numpy.array(cpu_tup)
# vc = str(vc)[0:4]
# vcs.append(vc)
mp = power_np[power_np>0].mean()
powers.append(mp)
power_confs.append(getConfidence(power_np[power_np>0]))
cpus.append(cpu_np.mean())
cpus_confs.append(getConfidence(cpu_np))
return vcs, powers, power_confs, cpus, cpus_confs
def plotPowerandCPU(x_values, summary_by, targets=[], **kwargs):
"""
X_values is a list or tuple of distinct values in the zeroth column for the x axis
targets is a list of matches for VALUES that you want to see in the plot, if not specified everything will be shown
"""
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%H%M%S')
xlabel = kwargs.get('xlabel', "Default X Lable")
title = kwargs.get('title', "Default Title")
filename = kwargs.get('filename', "")
idle_power = kwargs.get('idle_power', 2.6)
plt.rcParams['pdf.fonttype']=42 #to fix issue with wierd percentage symbols on Mac OSX
vcs = []
powers= []
power_confs = []
cpus = []
cpus_confs = []
# print summary_by
vcs, powers, power_confs, cpus, cpus_confs = processPowerandCPUdata(x_values, summary_by, targets)
ind = numpy.arange(len(vcs))
ind = ind+0.5 # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax1 = plt.subplots()
# plt.xticks(rotation=90
rects1 = ax1.bar(ind, powers, color='g', yerr=power_confs)
# ax1.set_ylim([40,120])
ax1.set_xlabel(xlabel)
ax1.set_ylabel(r'System Power $- P_a$ (W)' )
ax1.grid(True)
ax1.axhline(idle_power, color='blue', linewidth=2)
ax2 = ax1.twinx()
ax2.set_ylim([0,20])
ax2.plot(ind+0.4, cpus, color='r')
ax2.errorbar(ind+0.4, cpus, yerr=cpus_confs, color='r', ecolor='r', fmt='o')
ax2.set_ylabel('CPU(%)',color='r')
# You can specify a rotation for the tick labels in degrees or with keywords.
plt.xticks(ind+0.4, vcs)
# plt.setp(ax1[1].xaxis.get_majorticklabels(), )
#Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.25)
plt.title(title)
if not os.path.exists("plots"):
os.makedirs("plots")
plt.savefig(filename)
plt.show()
print time.time()-s
def plotBW(x_values, summary_by_codec, targets=[], **kwargs):
"""
values is a list or tuple of distinct values in the zeroth column for the x asis
targets is a list of matches for VALUES that you want to see in the plot, if not specified everything will be show
"""
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%H%M%S')
xlabel = kwargs.get('xlabel', "Default X Lable")
title = kwargs.get('title', "Default Title")
filename = kwargs.get('filename', "")
plt.rcParams['pdf.fonttype']=42 #to fix issue with wierd percentage symbols on Mac OSX
vcs = []
powers= []
power_confs = []
cpus = []
cpus_confs = []
for vcodec in x_values:
vc = vcodec[0]
if not targets or ((targets) and (vc in targets)):
power_tup = []
cpu_tup = []
for each_tuple in summary_by_codec:
if vc in each_tuple:
val = each_tuple[1]
if val is not None:
power_tup.append(each_tuple[1])
cpu_tup.append(each_tuple[2])
# print vc, power_tup
power_np = numpy.array(power_tup)
cpu_np = numpy.array(cpu_tup)
vc = vc[0:4]
vcs.append(vc)
powers.append(power_np[power_np>0].mean())
power_confs.append(getConfidence(power_np[power_np>0]))
cpus.append(cpu_np.mean())
cpus_confs.append(getConfidence(cpu_np))
ind = numpy.arange(len(vcs))
ind = ind+0.5 # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax1 = plt.subplots()
# plt.xticks(rotation=90)
rects1 = ax1.bar(ind, powers, color='g', yerr=power_confs)
# ax1.set_ylim([40,120])
ax1.set_xlabel(xlabel)
ax1.set_ylabel('Bitrate (Mbps)')
ax1.grid(True)
# You can specify a rotation for the tick labels in degrees or with keywords.
plt.xticks(ind+0.4, vcs)
# plt.setp(ax1[1].xaxis.get_majorticklabels(), )
#Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.25)
plt.title(title)
if not os.path.exists("plots"):
os.makedirs("plots")
plt.savefig(filename)
plt.show()
print time.time()-s
def getConfbyTitleandHeightAbeg(movies_720,h):
confs = []
for title in movies_720:
# h = 1080
vals = vEQdb.getQuerybyNameandHeight(title, h)
# logging.debug("Title: %s, Value %s " % (title, vals))
if vals:
p = zip(*vals)[2]
# print p
np_ar = numpy.array(p)
np_ar = np_ar[np_ar>0]
confs.append(getConfidence(np_ar) )
else:
confs.append(0)
return confs
itags=["243 - 640x360 (DASH video)", "43 - 640x360", "243 - 640x360 (DASH video)",
"136 - 1280x640 (DASH video)", "244 - 854x480 (DASH video)",
"135 - 854x480 (DASH video)", "136 - 1280x720 (DASH video",
"247 - 1280x720 (DASH video)", "137 - 1920x1080 (DASH video)",
"248 - 1920x1080 (DASH video)", "264 - 2560x1440 (DASH video)",
"272 - 3840x2160 (DASH video)", "266 - 3840x2160 (DASH video)"
"138 - 3840x2160 (DASH video)", "313 - 3840x2160 (DASH video)" ]
# heights = ['320','480','720','1080','1440','2160']
# heights = ['240','360','480','720','1080', '1440', '2160']
heights = ['240','360','480','720','1080']
title1 = 'vEQ-benchmark - Summary results\n (Linux workstation, YouTube videos )\n'
title1 = 'vEQ-benchmark - Summary results\n (Windows workstation, YouTube videos )\n'
plat="-windows"
filename1 = "benchmark-results-by-height" + plat
filename2 = "benchmark-results-by-itags" + plat
vcodecs = vEQdb.getDistinctVideoCodecsfromDB()
summary_by_codec = vEQdb.getSummaryfromVeqDB()
vheights = vEQdb.getDistinctVideoHeightfromDB(min_height=0)
values_by_height = vEQdb.getSummaryfromVeqDBbyHeight(min_cpu=2,min_power=1,min_height=0)
vtitles = vEQdb.getDistinctColumnfromDB("video_name")
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S')
plotPowerandCPU(vheights, values_by_height, targets=heights, xlabel='Video height (Youtube)', title='System Power usage by rPI2' , idle_power=2.5, filename="raspeberyPI.png")
# xvalues = vEQdb.getDistinctColumnfromDBwithHeightFilter("video_name", height_filter=720)
# for v in xvalues:
# print v
# print
#
# x2values = vEQdb.getDistinctColumnfromDBwithHeightFilter("video_name", height_filter=1080)
# for v in x2values:
# print v
# print
#
# x3values = vEQdb.getDistinctColumnfromDBwithHeightFilter("video_name", height_filter=2160)
#
# for v in x3values:
# print v
# print
#
# movies_720 = zip(*xvalues)[0]
# powers_720 = list(zip(*xvalues)[1])
# cpus_720 = list(zip(*xvalues)[2])
#
# cpus_1080=[]
# powers_2160=[]
# cpus_2160=[]
#
# powers_1080 = getMatchListFromTuple(movies_720, x2values, 1)
# powers_2160 = getMatchListFromTuple(movies_720, x3values, 1)
#
# cpus_1080 = getMatchListFromTuple(movies_720, x2values, 2)
# cpus_2160 = getMatchListFromTuple(movies_720, x3values, 2)
#
# movie_labels = [item[0:16] for item in movies_720]
#
# power720errs = getConfbyTitleandHeightAbeg(movies_720, 720)
# power1080errs = getConfbyTitleandHeightAbeg(movies_720, 1080)
# power2160errs = getConfbyTitleandHeightAbeg(movies_720, 2160)
# print movie_labels
# print powers_720
# print powers_1080
# print powers_2160
# print power720errs, power1080errs, power1080errs
# plotPowerandCPU(vcodecs, summary_by_codec, targets=itags, xlabel='itags (YouTube)', title=title1, filename=filename2)
# plotPowerandCPU(vcodecs, summary_by_codec, targets=heights, xlabel='heights (YouTube)', title=title1, filename=filename2)
# print "vheight"
# for v in vheights:
# print "vheight: " + str(v)
# plotPowerandCPU(vheights, summary_by_heights, targets=heights, xlabel='Video height (Youtube)', title="Mean Bitrate for Youtube Videos", filename="kk.png")
# plotPowerandCPU(vtitles, summary_by_codec)
# plotMultiplePowerBarsForTitle(x0=powers_720, x0_errs=power720errs, x1=powers_1080, x1_errs=power1080errs, x2_errs=power2160errs, x2=powers_2160, xlabel=movie_labels)
|
gpl-2.0
|
Richert/BrainNetworks
|
RNNs/rnn_tryout.py
|
1
|
2801
|
from RNNs import QIFExpAddNoiseSyns
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter1d
# STEP 0: Define simulation condition
#####################################
# parse worker indices from script arguments
idx_cond = 570
# STEP 1: Load pre-generated RNN parameters
###########################################
path = "/home/rgast/PycharmProjects/BrainNetworks/RC/results"
config = pickle.load(open(f"{path}/qif_micro_config.pkl", 'rb'))
# connectivity matrix
C = config['C']
# input
inp = config['inp']
# input weights
W_in = config['W_in']
# simulation config
T = config['T']
dt = config['dt']
dts = config['dts']
cutoff = config['cutoff']
# target values
targets = config['targets']
# adaptation strength
alpha = 0.3 # config['alphas'][idx_cond]
# eta
eta = -0.3 # config['etas'][idx_cond]
# STEP 2: define remaining network parameters
#############################################
# general parameters
N = C.shape[0]
m = W_in.shape[0]
n_folds = 5
ridge_alpha = 1e-3
# qif parameters
Delta = 0.1
J = 10.0
D = 0.1
# STEP 3: Evaluate classification performance of RNN
####################################################
# setup QIF RNN
qif_rnn = QIFExpAddNoiseSyns(C, eta, J, Delta=Delta, alpha=alpha, D=D, tau_s=0.5)
# perform simulation
X = qif_rnn.run(T, dt, dts, inp=inp, W_in=W_in, state_record_key='t1', cutoff=cutoff)
# prepare training data
buffer_val = 0
for i in range(X.shape[1]):
X[:, i] = gaussian_filter1d(X[:, i], 1.0 / dts, mode='constant', cval=buffer_val)
y = targets
# split into test and training data
split = int(np.round(X.shape[0]*0.75, decimals=0))
X_train = X[:split, :]
y_train = y[:split]
X_test = X[split:, :]
y_test = y[split:]
# train RNN
key, scores, coefs = qif_rnn.ridge_fit(X=X_train, y=y_train, alpha=ridge_alpha, k=n_folds, fit_intercept=False, copy_X=True,
solver='lsqr')
score, _ = qif_rnn.test(X=X_test, y=y_test, readout_key=key)
y_predict = qif_rnn.predict(X=X, readout_key=key)
print(f"Classification performance on test data: {score}")
# plotting
fig, axes = plt.subplots(nrows=4)
ax1 = axes[0]
ax1.plot(np.mean(X, axis=1))
ax2 = axes[1]
im = ax2.imshow(X.T, aspect='auto', cmap="plasma", vmin=0, vmax=0.005)
#plt.colorbar(im, ax=ax2, shrink=0.5)
ax3 = axes[2]
ax3.plot(y)
ax3.plot(y_predict)
plt.legend(['target', 'output'])
ax4 = axes[3]
start = int(cutoff/dt)
ax4.plot(inp[0, start:])
ax4.plot(inp[1, start:])
plt.legend(['lorenz', 'stula'])
plt.tight_layout()
# plot connectivity
fig2, ax = plt.subplots()
im1 = ax.imshow(C, aspect='auto', cmap="plasma", vmin=0, vmax=np.max(C[:]))
plt.colorbar(im1, ax=ax, shrink=0.5)
plt.title('C')
plt.tight_layout()
print(f'Synaptic sparseness: {np.sum(C[:] == 0)/N**2}')
plt.show()
|
apache-2.0
|
xwolf12/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
276
|
3790
|
# Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
|
bsd-3-clause
|
RobertABT/heightmap
|
build/matplotlib/lib/mpl_toolkits/mplot3d/proj3d.py
|
7
|
6832
|
#!/usr/bin/python
# 3dproj.py
#
"""
Various transforms used for by the 3D code
"""
from matplotlib.collections import LineCollection
from matplotlib.patches import Circle
import numpy as np
import numpy.linalg as linalg
def line2d(p0, p1):
"""
Return 2D equation of line in the form ax+by+c = 0
"""
# x + x1 = 0
x0, y0 = p0[:2]
x1, y1 = p1[:2]
#
if x0 == x1:
a = -1
b = 0
c = x1
elif y0 == y1:
a = 0
b = 1
c = -y1
else:
a = (y0-y1)
b = (x0-x1)
c = (x0*y1 - x1*y0)
return a, b, c
def line2d_dist(l, p):
"""
Distance from line to point
line is a tuple of coefficients a,b,c
"""
a, b, c = l
x0, y0 = p
return abs((a*x0 + b*y0 + c)/np.sqrt(a**2+b**2))
def line2d_seg_dist(p1, p2, p0):
"""distance(s) from line defined by p1 - p2 to point(s) p0
p0[0] = x(s)
p0[1] = y(s)
intersection point p = p1 + u*(p2-p1)
and intersection point lies within segment if u is between 0 and 1
"""
x21 = p2[0] - p1[0]
y21 = p2[1] - p1[1]
x01 = np.asarray(p0[0]) - p1[0]
y01 = np.asarray(p0[1]) - p1[1]
u = (x01*x21 + y01*y21)/float(abs(x21**2 + y21**2))
u = np.clip(u, 0, 1)
d = np.sqrt((x01 - u*x21)**2 + (y01 - u*y21)**2)
return d
def test_lines_dists():
import pylab
ax = pylab.gca()
xs, ys = (0,30), (20,150)
pylab.plot(xs, ys)
points = zip(xs, ys)
p0, p1 = points
xs, ys = (0,0,20,30), (100,150,30,200)
pylab.scatter(xs, ys)
dist = line2d_seg_dist(p0, p1, (xs[0], ys[0]))
dist = line2d_seg_dist(p0, p1, np.array((xs, ys)))
for x, y, d in zip(xs, ys, dist):
c = Circle((x, y), d, fill=0)
ax.add_patch(c)
pylab.xlim(-200, 200)
pylab.ylim(-200, 200)
pylab.show()
def mod(v):
"""3d vector length"""
return np.sqrt(v[0]**2+v[1]**2+v[2]**2)
def world_transformation(xmin, xmax,
ymin, ymax,
zmin, zmax):
dx, dy, dz = (xmax-xmin), (ymax-ymin), (zmax-zmin)
return np.array([
[1.0/dx,0,0,-xmin/dx],
[0,1.0/dy,0,-ymin/dy],
[0,0,1.0/dz,-zmin/dz],
[0,0,0,1.0]])
def test_world():
xmin, xmax = 100, 120
ymin, ymax = -100, 100
zmin, zmax = 0.1, 0.2
M = world_transformation(xmin, xmax, ymin, ymax, zmin, zmax)
print M
def view_transformation(E, R, V):
n = (E - R)
## new
# n /= mod(n)
# u = np.cross(V,n)
# u /= mod(u)
# v = np.cross(n,u)
# Mr = np.diag([1.]*4)
# Mt = np.diag([1.]*4)
# Mr[:3,:3] = u,v,n
# Mt[:3,-1] = -E
## end new
## old
n = n / mod(n)
u = np.cross(V, n)
u = u / mod(u)
v = np.cross(n, u)
Mr = [[u[0],u[1],u[2],0],
[v[0],v[1],v[2],0],
[n[0],n[1],n[2],0],
[0, 0, 0, 1],
]
#
Mt = [[1, 0, 0, -E[0]],
[0, 1, 0, -E[1]],
[0, 0, 1, -E[2]],
[0, 0, 0, 1]]
## end old
return np.dot(Mr, Mt)
def persp_transformation(zfront, zback):
a = (zfront+zback)/(zfront-zback)
b = -2*(zfront*zback)/(zfront-zback)
return np.array([[1,0,0,0],
[0,1,0,0],
[0,0,a,b],
[0,0,-1,0]
])
def proj_transform_vec(vec, M):
vecw = np.dot(M, vec)
w = vecw[3]
# clip here..
txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w
return txs, tys, tzs
def proj_transform_vec_clip(vec, M):
vecw = np.dot(M, vec)
w = vecw[3]
# clip here..
txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w
tis = (vecw[0] >= 0) * (vecw[0] <= 1) * (vecw[1] >= 0) * (vecw[1] <= 1)
if np.sometrue(tis):
tis = vecw[1] < 1
return txs, tys, tzs, tis
def inv_transform(xs, ys, zs, M):
iM = linalg.inv(M)
vec = vec_pad_ones(xs, ys, zs)
vecr = np.dot(iM, vec)
try:
vecr = vecr/vecr[3]
except OverflowError:
pass
return vecr[0], vecr[1], vecr[2]
def vec_pad_ones(xs, ys, zs):
try:
try:
vec = np.array([xs,ys,zs,np.ones(xs.shape)])
except (AttributeError,TypeError):
vec = np.array([xs,ys,zs,np.ones((len(xs)))])
except TypeError:
vec = np.array([xs,ys,zs,1])
return vec
def proj_transform(xs, ys, zs, M):
"""
Transform the points by the projection matrix
"""
vec = vec_pad_ones(xs, ys, zs)
return proj_transform_vec(vec, M)
def proj_transform_clip(xs, ys, zs, M):
"""
Transform the points by the projection matrix
and return the clipping result
returns txs,tys,tzs,tis
"""
vec = vec_pad_ones(xs, ys, zs)
return proj_transform_vec_clip(vec, M)
transform = proj_transform
def proj_points(points, M):
return zip(*proj_trans_points(points, M))
def proj_trans_points(points, M):
xs, ys, zs = zip(*points)
return proj_transform(xs, ys, zs, M)
def proj_trans_clip_points(points, M):
xs, ys, zs = zip(*points)
return proj_transform_clip(xs, ys, zs, M)
def test_proj_draw_axes(M, s=1):
import pylab
xs, ys, zs = [0, s, 0, 0], [0, 0, s, 0], [0, 0, 0, s]
txs, tys, tzs = proj_transform(xs, ys, zs, M)
o, ax, ay, az = (txs[0], tys[0]), (txs[1], tys[1]), \
(txs[2], tys[2]), (txs[3], tys[3])
lines = [(o, ax), (o, ay), (o, az)]
ax = pylab.gca()
linec = LineCollection(lines)
ax.add_collection(linec)
for x, y, t in zip(txs, tys, ['o', 'x', 'y', 'z']):
pylab.text(x, y, t)
def test_proj_make_M(E=None):
# eye point
E = E or np.array([1, -1, 2]) * 1000
#E = np.array([20,10,20])
R = np.array([1, 1, 1]) * 100
V = np.array([0, 0, 1])
viewM = view_transformation(E, R, V)
perspM = persp_transformation(100, -100)
M = np.dot(perspM, viewM)
return M
def test_proj():
import pylab
M = test_proj_make_M()
ts = ['%d' % i for i in [0,1,2,3,0,4,5,6,7,4]]
xs, ys, zs = [0,1,1,0,0, 0,1,1,0,0], [0,0,1,1,0, 0,0,1,1,0], \
[0,0,0,0,0, 1,1,1,1,1]
xs, ys, zs = [np.array(v)*300 for v in (xs, ys, zs)]
#
test_proj_draw_axes(M, s=400)
txs, tys, tzs = proj_transform(xs, ys, zs, M)
ixs, iys, izs = inv_transform(txs, tys, tzs, M)
pylab.scatter(txs, tys, c=tzs)
pylab.plot(txs, tys, c='r')
for x, y, t in zip(txs, tys, ts):
pylab.text(x, y, t)
pylab.xlim(-0.2, 0.2)
pylab.ylim(-0.2, 0.2)
pylab.show()
def rot_x(V, alpha):
cosa, sina = np.cos(alpha), np.sin(alpha)
M1 = np.array([[1,0,0,0],
[0,cosa,-sina,0],
[0,sina,cosa,0],
[0,0,0,0]])
return np.dot(M1, V)
def test_rot():
V = [1,0,0,1]
print rot_x(V, np.pi/6)
V = [0,1,0,1]
print rot_x(V, np.pi/6)
if __name__ == "__main__":
test_proj()
|
mit
|
dancingdan/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/io_test.py
|
137
|
5063
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
jaredwo/obsio
|
obsio/providers/hdf.py
|
1
|
1871
|
from .generic import ObsIO
import numpy as np
import pandas as pd
class HdfObsIO(ObsIO):
"""ObsIO to read observations from a local HDF5 store created by ObsIO.to_hdf
"""
def __init__(self, fpath):
"""
Parameters
----------
fpath : str
The local file path of the HDF5 store
"""
self.store = pd.HDFStore(fpath)
attrs = self.store.get_storer('stns').attrs
self.elems = attrs.elems
self.start_date = attrs.start_date
self.end_date = attrs.end_date
self.bbox = attrs.bbox
self.name = attrs.name
self._stns = None
def _read_stns(self):
return self.store.select('stns')
def _read_obs(self, stns_ids=None):
if stns_ids is None:
obs = self.store.select('obs')
else:
obs = []
# HDFStore can only read in chunks of 31
stn_chk = 31
for i in np.arange(len(stns_ids), step=stn_chk):
stnids = stns_ids[i:(i+stn_chk)]
obs_chk = self.store.select('obs', 'index=stnids')
obs.append(obs_chk)
obs = pd.concat(obs)
obs = obs.set_index('time', append=True).stack()
obs.name = 'obs_value'
obs.index.rename('elem', level=2, inplace=True)
obs = obs.reorder_levels(['station_id', 'elem',
'time']).sortlevel(0, sort_remaining=True)
obs = pd.DataFrame(obs)
return obs
def close(self):
self.store.close()
self.store = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
gpl-3.0
|
kristoforcarlson/nest-simulator-fork
|
pynest/nest/raster_plot.py
|
12
|
6855
|
# -*- coding: utf-8 -*-
#
# raster_plot.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import numpy
import pylab
def extract_events(data, time=None, sel=None):
"""
Extracts all events within a given time interval or are from a
given set of neurons.
- data is a matrix such that
data[:,0] is a vector of all gids and
data[:,1] a vector with the corresponding time stamps.
- time is a list with at most two entries such that
time=[t_max] extracts all events with t< t_max
time=[t_min, t_max] extracts all events with t_min <= t < t_max
- sel is a list of gids such that
sel=[gid1, ... , gidn] extracts all events from these gids.
All others are discarded.
Both time and sel may be used at the same time such that all
events are extracted for which both conditions are true.
"""
val = []
if time:
t_max = time[-1]
if len(time) > 1:
t_min = time[0]
else:
t_min = 0
for v in data:
t = v[1]
gid = v[0]
if time and (t < t_min or t >= t_max):
continue
if not sel or gid in sel:
val.append(v)
return numpy.array(val)
def from_data(data, title=None, hist=False, hist_binwidth=5.0, grayscale=False, sel=None):
"""
Plot raster from data array
"""
ts = data[:, 1]
d = extract_events(data, sel=sel)
ts1 = d[:, 1]
gids = d[:, 0]
return _make_plot(ts, ts1, gids, data[:, 0], hist, hist_binwidth, grayscale, title)
def from_file(fname, title=None, hist=False, hist_binwidth=5.0, grayscale=False):
"""
Plot raster from file
"""
if nest.is_iterable(fname):
data = None
for f in fname:
if data is None:
data = numpy.loadtxt(f)
else:
data = numpy.concatenate((data, numpy.loadtxt(f)))
else:
data = numpy.loadtxt(fname)
return from_data(data, title, hist, hist_binwidth, grayscale)
def from_device(detec, title=None, hist=False, hist_binwidth=5.0, grayscale=False, plot_lid=False):
"""
Plot raster from spike detector
"""
if not nest.GetStatus(detec)[0]["model"] == "spike_detector":
raise nest.NESTError("Please provide a spike_detector.")
if nest.GetStatus(detec, "to_memory")[0]:
ts, gids = _from_memory(detec)
if not len(ts):
raise nest.NESTError("No events recorded!")
if plot_lid:
gids = [nest.GetLID([x]) for x in gids]
if title is None:
title = "Raster plot from device '%i'" % detec[0]
if nest.GetStatus(detec)[0]["time_in_steps"]:
xlabel = "Steps"
else:
xlabel = "Time (ms)"
return _make_plot(ts, ts, gids, gids, hist, hist_binwidth, grayscale, title, xlabel)
elif nest.GetStatus(detec, "to_file")[0]:
fname = nest.GetStatus(detec, "filenames")[0]
return from_file(fname, title, hist, hist_binwidth, grayscale)
else:
raise nest.NESTError("No data to plot. Make sure that either to_memory or to_file are set.")
def _from_memory(detec):
ev = nest.GetStatus(detec, "events")[0]
return ev["times"], ev["senders"]
def _make_plot(ts, ts1, gids, neurons, hist, hist_binwidth, grayscale, title, xlabel=None):
"""
Generic plotting routine that constructs a raster plot along with
an optional histogram (common part in all routines above)
"""
pylab.figure()
if grayscale:
color_marker = ".k"
color_bar = "gray"
else:
color_marker = "."
color_bar = "blue"
color_edge = "black"
if xlabel is None:
xlabel = "Time (ms)"
ylabel = "Neuron ID"
if hist:
ax1 = pylab.axes([0.1, 0.3, 0.85, 0.6])
plotid = pylab.plot(ts1, gids, color_marker)
pylab.ylabel(ylabel)
pylab.xticks([])
xlim = pylab.xlim()
pylab.axes([0.1, 0.1, 0.85, 0.17])
t_bins = numpy.arange(numpy.amin(ts), numpy.amax(ts), float(hist_binwidth))
n, bins = _histogram(ts, bins=t_bins)
num_neurons = len(numpy.unique(neurons))
heights = 1000 * n / (hist_binwidth * num_neurons)
pylab.bar(t_bins, heights, width=hist_binwidth, color=color_bar, edgecolor=color_edge)
pylab.yticks([int(x) for x in numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4)])
pylab.ylabel("Rate (Hz)")
pylab.xlabel(xlabel)
pylab.xlim(xlim)
pylab.axes(ax1)
else:
plotid = pylab.plot(ts1, gids, color_marker)
pylab.xlabel(xlabel)
pylab.ylabel(ylabel)
if title is None:
pylab.title("Raster plot")
else:
pylab.title(title)
pylab.draw()
return plotid
def _histogram(a, bins=10, bin_range=None, normed=False):
from numpy import asarray, iterable, linspace, sort, concatenate
a = asarray(a).ravel()
if bin_range is not None:
mn, mx = bin_range
if mn > mx:
raise ValueError("max must be larger than min in range parameter")
if not iterable(bins):
if bin_range is None:
bin_range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in bin_range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins, endpoint=False)
else:
if (bins[1:] - bins[:-1] < 0).any():
raise ValueError("bins must increase monotonically")
# best block size probably depends on processor cache size
block = 65536
n = sort(a[:block]).searchsorted(bins)
for i in range(block, a.size, block):
n += sort(a[i:i + block]).searchsorted(bins)
n = concatenate([n, [len(a)]])
n = n[1:] - n[:-1]
if normed:
db = bins[1] - bins[0]
return 1.0 / (a.size * db) * n, bins
else:
return n, bins
def show():
"""
Call pylab.show() to show all figures and enter the GUI main loop.
Python will block until all figure windows are closed again.
You should call this function only once at the end of a script.
See also: http://matplotlib.sourceforge.net/faq/howto_faq.html#use-show
"""
pylab.show()
|
gpl-2.0
|
adamobeng/ibis
|
scripts/airline.py
|
9
|
1268
|
import ibis
import os
import pandas
def wrangle_csvs():
years = range(1987, 2009)
for year in years:
path = '%d.csv.bz2' % year
outpath = os.path.expanduser('~/data/%d_clean.csv' % year)
print 'Working on %s' % path
df = pandas.read_csv(path, compression='bz2')
df.to_csv(outpath, header=False, index=False,
float_format='%g', na_rep='\N')
schema = ibis.schema([
('year', 'int32'),
('month', 'int8'),
('day', 'int8'),
('dayofweek', 'int8'),
('dep_time', 'int32'),
('crs_dep_time', 'int32'),
('arr_time', 'int32'),
('crs_arr_time', 'int32'),
('carrier', 'string'),
('flight_num', 'int32'),
('tail_num', 'int32'),
('actual_elapsed_time', 'int32'),
('crs_elapsed_time', 'int32'),
('airtime', 'int32'),
('arrdelay', 'int32'),
('depdelay', 'int32'),
('origin', 'string'),
('dest', 'string'),
('distance', 'int32'),
('taxi_in', 'int32'),
('taxi_out', 'int32'),
('cancelled', 'int8'),
('cancellation_code', 'string'),
('diverted', 'int8'),
('carrier_delay', 'int32'),
('weather_delay', 'int32'),
('nas_delay', 'int32'),
('security_delay', 'int32'),
('late_aircraft_delay', 'int32')
])
|
apache-2.0
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/pandas/io/json/json.py
|
6
|
24553
|
# pylint: disable-msg=E1101,W0613,W0603
import os
import numpy as np
import pandas._libs.json as json
from pandas._libs.tslib import iNaT
from pandas.compat import StringIO, long, u
from pandas import compat, isnull
from pandas import Series, DataFrame, to_datetime, MultiIndex
from pandas.io.common import get_filepath_or_buffer, _get_handle
from pandas.core.common import AbstractMethodError
from pandas.io.formats.printing import pprint_thing
from .normalize import _convert_to_line_delimits
from .table_schema import build_table_schema
from pandas.core.dtypes.common import is_period_dtype
loads = json.loads
dumps = json.dumps
TABLE_SCHEMA_VERSION = '0.20.0'
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False):
if lines and orient != 'records':
raise ValueError(
"'lines' keyword only valid when 'orient' is records")
if orient == 'table' and isinstance(obj, Series):
obj = obj.to_frame(name=obj.name or 'values')
if orient == 'table' and isinstance(obj, DataFrame):
writer = JSONTableWriter
elif isinstance(obj, Series):
writer = SeriesWriter
elif isinstance(obj, DataFrame):
writer = FrameWriter
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
s = writer(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
if lines:
s = _convert_to_line_delimits(s)
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, 'w') as fh:
fh.write(s)
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
return dumps(
self.obj,
orient=self.orient,
double_precision=self.double_precision,
ensure_ascii=self.ensure_ascii,
date_unit=self.date_unit,
iso_dates=self.date_format == 'iso',
default_handler=self.default_handler
)
class SeriesWriter(Writer):
_default_orient = 'index'
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'%s'" % self.orient)
class FrameWriter(Writer):
_default_orient = 'columns'
def _format_axes(self):
""" try to axes if they are datelike """
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'%s'." % self.orient)
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'%s'." % self.orient)
class JSONTableWriter(FrameWriter):
_default_orient = 'records'
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
"""
Adds a `schema` attribut with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super(JSONTableWriter, self).__init__(
obj, orient, date_format, double_precision, ensure_ascii,
date_unit, default_handler=default_handler)
if date_format != 'iso':
msg = ("Trying to write with `orient='table'` and "
"`date_format='%s'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`" % date_format)
raise ValueError(msg)
self.schema = build_table_schema(obj)
# NotImplementd on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
raise NotImplementedError(
"orient='table' is not supported for MultiIndex")
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if ((obj.ndim == 1) and (obj.name in set(obj.index.names)) or
len(obj.columns & obj.index.names)):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=['timedelta']).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(
lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
self.obj = obj.reset_index()
self.date_format = 'iso'
self.orient = 'records'
def write(self):
data = super(JSONTableWriter, self).write()
serialized = '{{"schema": {}, "data": {}}}'.format(
dumps(self.schema), data)
return serialized
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
convert_axes=True, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False):
"""
Convert a JSON string to pandas object
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file
could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
If True, infer dtypes, if a dict of column to dtype, then use those,
if False, then don't infer dtypes at all, applies only to the data.
convert_axes : boolean, default True
Try to convert the axes to the proper dtypes.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
See Also
--------
DataFrame.to_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf,
encoding=encoding)
if isinstance(filepath_or_buffer, compat.string_types):
try:
exists = os.path.exists(filepath_or_buffer)
# if the filepath is too long will raise here
# 5874
except (TypeError, ValueError):
exists = False
if exists:
fh, handles = _get_handle(filepath_or_buffer, 'r',
encoding=encoding)
json = fh.read()
fh.close()
else:
json = filepath_or_buffer
elif hasattr(filepath_or_buffer, 'read'):
json = filepath_or_buffer.read()
else:
json = filepath_or_buffer
if lines:
# If given a json lines file, we break the string into lines, add
# commas and put it in a json list to make a valid json object.
lines = list(StringIO(json.strip()))
json = '[' + ','.join(lines) + ']'
obj = None
if typ == 'frame':
obj = FrameParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
dtype = dict(data=dtype)
obj = SeriesParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
return obj
class Parser(object):
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
's': long(31536000),
'ms': long(31536000000),
'us': long(31536000000000),
'ns': long(31536000000000000)}
def __init__(self, json, orient, dtype=True, convert_axes=True,
convert_dates=True, keep_default_dates=False, numpy=False,
precise_float=False, date_unit=None):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of %s' %
(self._STAMP_UNITS,))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"checks that dict has only the appropriate keys for orient='split'"
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(u("JSON data had unexpected key(s): %s") %
pprint_thing(bad_keys))
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
""" try to convert axes """
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
""" try to parse a ndarray like into a column by inferring dtype """
# don't try to coerce, unless a force conversion
if use_dtypes:
if self.dtype is False:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except:
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == 'object':
# try float
try:
data = data.astype('float64')
result = True
except:
pass
if data.dtype.kind == 'f':
if data.dtype != 'float64':
# coerce floats to 64
try:
data = data.astype('float64')
result = True
except:
pass
# do't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
# coerce ints if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
data = new_data
result = True
except:
pass
# coerce ints to 64
if data.dtype == 'int':
# coerce floats to 64
try:
data = data.astype('int64')
result = True
except:
pass
return data, result
def _try_convert_to_date(self, data):
""" try to parse a ndarray like into a date column
try to coerce object in epoch/iso formats and
integer/float in epcoh formats, return a boolean if parsing
was successful """
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == 'object':
try:
new_data = data.astype('int64')
except:
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (isnull(new_data.values) | (new_data > self.min_stamp) |
(new_data.values == iNaT))
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors='raise',
unit=date_unit)
except ValueError:
continue
except:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(
loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
'data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
_split_keys = ('columns', 'index', 'data')
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(json, dtype=None, numpy=True, labelled=True,
precise_float=self.precise_float)
if args:
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
else:
self.obj = DataFrame(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None).T
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
def _process_converter(self, f, filt=None):
""" take a conversion function and possibly recreate the frame """
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False))
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
""" return if this col is ok to try for a date parse """
if not isinstance(col, compat.string_types):
return False
col_lower = col.lower()
if (col_lower.endswith('_at') or
col_lower.endswith('_time') or
col_lower == 'modified' or
col_lower == 'date' or
col_lower == 'datetime' or
col_lower.startswith('timestamp')):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: ((self.keep_default_dates and is_ok(col)) or
col in convert_dates))
|
agpl-3.0
|
iproduct/course-social-robotics
|
11-dnn-keras/venv/Lib/site-packages/pandas/tests/indexes/numeric/test_astype.py
|
5
|
2943
|
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import pandas_dtype
from pandas import Float64Index, Index, Int64Index
import pandas._testing as tm
class TestAstype:
def test_astype_float64_to_object(self):
float_index = Float64Index([0.0, 2.5, 5.0, 7.5, 10.0])
result = float_index.astype(object)
assert result.equals(float_index)
assert float_index.equals(result)
assert isinstance(result, Index) and not isinstance(result, Float64Index)
def test_astype_float64_mixed_to_object(self):
# mixed int-float
idx = Float64Index([1.5, 2, 3, 4, 5])
idx.name = "foo"
result = idx.astype(object)
assert result.equals(idx)
assert idx.equals(result)
assert isinstance(result, Index) and not isinstance(result, Float64Index)
@pytest.mark.parametrize("dtype", ["int16", "int32", "int64"])
def test_astype_float64_to_int_dtype(self, dtype):
# GH#12881
# a float astype int
idx = Float64Index([0, 1, 2])
result = idx.astype(dtype)
expected = Int64Index([0, 1, 2])
tm.assert_index_equal(result, expected)
idx = Float64Index([0, 1.1, 2])
result = idx.astype(dtype)
expected = Int64Index([0, 1, 2])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_astype_float64_to_float_dtype(self, dtype):
# GH#12881
# a float astype int
idx = Float64Index([0, 1, 2])
result = idx.astype(dtype)
expected = idx
tm.assert_index_equal(result, expected)
idx = Float64Index([0, 1.1, 2])
result = idx.astype(dtype)
expected = Index(idx.values.astype(dtype))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
def test_cannot_cast_to_datetimelike(self, dtype):
idx = Float64Index([0, 1.1, 2])
msg = (
f"Cannot convert Float64Index to dtype {pandas_dtype(dtype)}; "
f"integer values are required for conversion"
)
with pytest.raises(TypeError, match=re.escape(msg)):
idx.astype(dtype)
@pytest.mark.parametrize("dtype", [int, "int16", "int32", "int64"])
@pytest.mark.parametrize("non_finite", [np.inf, np.nan])
def test_cannot_cast_inf_to_int(self, non_finite, dtype):
# GH#13149
idx = Float64Index([1, 2, non_finite])
msg = r"Cannot convert non-finite values \(NA or inf\) to integer"
with pytest.raises(ValueError, match=msg):
idx.astype(dtype)
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype="object")
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
assert result.dtype == expected.dtype
tm.assert_index_equal(result, expected)
|
gpl-2.0
|
fabioticconi/scikit-learn
|
sklearn/datasets/tests/test_samples_generator.py
|
181
|
15664
|
from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
|
bsd-3-clause
|
apark263/tensorflow
|
tensorflow/contrib/factorization/python/ops/gmm_test.py
|
41
|
8716
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments = self.make_random_points(
self.true_centers, self.num_points)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
return (points, assignments)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertLess(score1, score2)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments = self.make_random_points(clusters, num_points)
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
jeffschulte/protein
|
old/vfield.py
|
2
|
2913
|
from __future__ import division
from numpy import *
from matplotlib import *
from pylab import *
import sys
import glob
from matplotlib.widgets import Slider, RadioButtons
from file_loader import *
#NEEDS UPDATE TO NEW FILE LOAD
def gradient(data):
if len(data.shape) == 2:
gradStore = [[(0.,0.) for y in range((data_shape[0]-2))] for z in range((data_shape[1]-2))]
for i in range(2,data_shape[0]-2):
for j in range(2,data_shape[1]-2):
partial_x1 = (data[i][j+1] - data[i][j-1])/(2*dx)
partial_x2 = (data[i+1][j] - data[i-1][j])/(2*dx)
gradStore[i][j] = (partial_x1, partial_x2)
else:
gradStore = [[[(0.,0.,0.) for x in range((data_shape[0]-2))] for y in range((data_shape[1]-2))] for z in range((data_shape[2]-2))]
for i in range(2,data_shape[0]-2):
for j in range(2,data_shape[1]-2):
for k in range(2,data_shape[0]-2):
partial_x1 = (data[i][j][k+1] - data[i][j][k-1])/(2*dx)
partial_x2 = (data[i][j+1][k] - data[i][j-1][k])/(2*dx)
partial_x3 = (data[i+1][j][k] - data[i-1][j][k])/(2*dx)
gradStore[i][j][k] = (partial_x1, partial_x2, partial_x3)
return gradStore
def mapping(data, page): #kind of like meshgrid(X,Y) for separating the partials.
if len(data.shape) == 2:
x2_component = zeros_like(data)
x3_component = zeros_like(data)
gradf = gradient(data)
for i in range(data_shape[1]-2):
for j in range(data_shape[0]-2):
x2_component[i][j] = gradf[i][j][0]
x3_component[i][j] = gradf[i][j][1]
if (abs(x3_component[i][j])>1000 or abs(x2_component[i][j])>1000):
x2_component[i][j]=0
x3_component[i][j]=0
return x2_component, x3_component
else:
x2_component = zeros_like(data[page])
x3_component = zeros_like(data[page])
gradf = gradient(data)
for i in range(data_shape[1]-2):
for j in range(data_shape[0]-2):
x2_component[i][j] = gradf[page][i][j][0]
x3_component[i][j] = gradf[page][i][j][1]
return x2_component, x3_component
t0 = 0
x0 = 0
Q = quiver(axis[0],axis[1],mapping(data_natp_set[0],x0)[0],mapping(data_natp_set[0],x0)[1])
#this is the time slider
t0_ax = axes([0.25, 0, 0.5, 0.03], axisbg='slategray')
t0_slider = Slider(t0_ax, 'time step', 0, t_steps, valinit = 0, valfmt='%0.0f')
def update_time(val):
global t0
t0 = round(t0_slider.val)
Q.set_UVC(mapping(data_natp_set[t0],x0)[0],mapping(data_natp_set[t0],x0)[1])
t0_slider.on_changed(update_time)
#this would be used for going through 3d data along the x axis
depth_ax = axes([0.25, 0.03, 0.5, 0.03], axisbg='slategray')
depth_slider = Slider(depth_ax, 'depth', 0, data_shape[0], valinit = 0, valfmt='%0.1f')
def update_depth(val):
global x0
x0 = depth_slider.val
vector_plot()
depth_slider.on_changed(update_depth)
show()
|
mit
|
gviejo/ThalamusPhysio
|
python/figure_article_v3/main_article_v3_fig_4.py
|
1
|
28416
|
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
import sys
sys.path.append("../")
from functions import *
from pylab import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import matplotlib.cm as cm
import os
from scipy.ndimage import gaussian_filter
###############################################################################################################
# TO LOAD
###############################################################################################################
data = cPickle.load(open('../../figures/figures_articles_v2/figure3/dict_fig3_article.pickle', 'rb'))
allzth = data['swr_modth' ]
eigen = data['eigen' ]
times = data['times' ]
allthetamodth = data['theta_modth' ]
phi = data['phi' ]
zpca = data['zpca' ]
phi2 = data['phi2' ]
jX = data['rX' ]
jscore = data['jscore' ]
force = data['force' ] # theta modulation
variance = data['variance' ] # ripple modulation
# sort allzth
index = allzth[0].sort_values().index.values
index = index[::-1]
allzthsorted = allzth.loc[index]
phi = phi.loc[index]
phi2 = phi2.loc[index]
allthetamodth = allthetamodth.loc[index]
theta2 = pd.read_hdf("/mnt/DataGuillaume/MergedData/THETA_THAL_mod_2.h5")
theta2 = theta2['rem']
# REPlACING WITH VERSION 2 OF THETA MOD HERE
allthetamodth = theta2.loc[allthetamodth.index]
allthetamodth.rename({'pval':'pvalue'}, inplace=True)
allthetamodth['phase'] += 2*np.pi
allthetamodth['phase'] %= 2*np.pi
spikes_theta_phase = cPickle.load(open('/mnt/DataGuillaume/MergedData/SPIKE_THETA_PHASE.pickle', 'rb'))
###############################################################################################################
# PLOT11
###############################################################################################################
def figsize(scale):
fig_width_pt = 483.69687 # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this)
fig_width = fig_width_pt*inches_per_pt*scale # width in inches
fig_height = fig_width*golden_mean*1.5 # height in inches
fig_size = [fig_width,fig_height]
return fig_size
def simpleaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
def noaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_xticks([])
ax.set_yticks([])
# ax.xaxis.set_tick_params(size=6)
# ax.yaxis.set_tick_params(size=6)
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
mpl.use("pdf")
pdf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
# "text.usetex": True, # use LaTeX to write all text
# "font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 8, # LaTeX default is 10pt font.
"font.size": 7,
"legend.fontsize": 7, # Make the legend/label fonts a little smaller
"xtick.labelsize": 7,
"ytick.labelsize": 7,
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
],
"lines.markeredgewidth" : 0.2,
"axes.linewidth" : 0.8,
"ytick.major.size" : 1.5,
"xtick.major.size" : 1.5
}
mpl.rcParams.update(pdf_with_latex)
import matplotlib.gridspec as gridspec
from matplotlib.pyplot import *
from mpl_toolkits.axes_grid.inset_locator import inset_axes
fig = figure(figsize = figsize(1), tight_layout = True)
# outer = gridspec.GridSpec(3,3, wspace = 0.4, hspace = 0.5)#, height_ratios = [1,3])#, width_ratios = [1.6,0.7])
gs = gridspec.GridSpec(5,3, wspace = 0.3, hspace = 0.6, left = 0.08, right = 0.98, top = 0.97, bottom = 0.05, height_ratios = [0.25,0.2,0.3,-0.12,0.6])
########################################################################
# A. SHarp waves ripples modulation
########################################################################
subplot(gs[0,0])
im = imshow(allzthsorted, aspect = 'auto', cmap = 'viridis')
xticks(np.arange(20,200,40), (times[np.arange(20,200,40)]).astype('int'))
yticks([0,700], ['1', '700'])
# cb = colorbar()
# cb.set_label("z", labelpad = -10, y = 1.08, rotation = 0)
ylabel("Thalamic\nneurons", labelpad = -9.0)
xlabel("Time from SWRs (ms)", labelpad = -0.05)
title("SWR modulation", fontsize = 8, y = 0.95)
text(-0.3, 1.03, "a", transform = gca().transAxes, fontsize = 10, fontweight='bold')
cax = inset_axes(gca(), "4%", "100%",
bbox_to_anchor=(1, -0.06, 1, 1),
bbox_transform=gca().transAxes,
loc = 'lower left')
cax.set_title("z", fontsize = 7, pad = 2.5)
cb = colorbar(im, cax = cax, orientation = 'vertical', ticks = [-2, 0, 2])
########################################################################
# B. JPCA
########################################################################
ax = subplot(gs[1, 0])
simpleaxis(ax)
plot(times, jX[:,0], color = 'black', label = 'jPC 1')
plot(times, jX[:,1], color = 'grey', label = 'jPC 2')
legend(frameon=False,loc = 'lower left', bbox_to_anchor=(0.9,0.4),handlelength=1)
ylabel('jPC', labelpad = 0.1)
xlabel('Time from SWRs (ms)', labelpad = -0.05)
xticks([-400,-200,0,200,400])
# title('jPCA', fontsize = 8, y = 1)
# text(0.15, 0.86, "jPCA", transform = gca().transAxes, fontsize = 10)
text(-0.3, 1.05, "b", transform = gca().transAxes, fontsize = 10, fontweight='bold')
# ########################################################################
# # C. ORBIT
# ########################################################################
# ax = subplot(gs[0, 2])
# # axis('off')
# simpleaxis(ax)
# plot(jX[0,0], jX[0,1], 'o', markersize = 3, color = '#5c7d6f')
# plot(jX[:,0], jX[:,1], linewidth = 0.8, color = '#5c7d6f')
# arrow(jX[-10,0],jX[-10,1],jX[-1,0]-jX[-10,0],jX[-1,1]-jX[-10,1], color = '#5c7d6f', head_width = 0.01)
# # plot(jX[np.where(times==-250),0], jX[np.where(times==-250),1], 'o', color = '#5c7d6f', markersize = 2)
# # plot(jX[np.where(times== 250),0], jX[np.where(times== 250),1], 'o', color = '#5c7d6f', markersize = 2)
# # plot(jX[np.where(times== 0),0], jX[np.where(times== 0),1], 'o', color = '#5c7d6f', markersize = 2)
# annotate("-50 ms", xy = (jX[np.where(times==-50),0], jX[np.where(times==-50),1]), xytext = (jX[np.where(times==-50),0]-0.124, jX[np.where(times==-50),1]-0.015), fontsize = 6)
# annotate( "50 ms", xy = (jX[np.where(times== 50),0], jX[np.where(times== 50),1]), xytext = (jX[np.where(times== 50),0]+0.01, jX[np.where(times== 50),1]), fontsize = 6)
# annotate( "0 ms", xy = (jX[np.where(times== 0),0], jX[np.where(times== 0),1]), xytext = (jX[np.where(times== 0),0]-0.04, jX[np.where(times== 0),1]+0.02), fontsize = 6)
# ax.spines['left'].set_bounds(np.min(jX[:,1]), np.min(jX[:,1]+0.1))
# ax.spines['bottom'].set_bounds(np.min(jX[:,0]), np.min(jX[:,0]+0.1))
# xticks([], [])
# yticks([], [])
# ax.xaxis.set_label_coords(0.15, -0.02)
# ax.yaxis.set_label_coords(-0.02, 0.15)
# ylabel('jPC2')
# xlabel('jPC1')
# text(-0.1, 1.14, "C", transform = gca().transAxes, fontsize = 10)
# jpca = pd.DataFrame(index = times, data = jX)
# offs = 0.1
# # arrow(jpca.loc[50,0], jpca.loc[50,1], jpca.loc[55,0]-jpca.loc[50,0], jpca.loc[55,1]-jpca.loc[50,1], head_width=.020, fc = '#5c7d6f', shape='full', lw=0, length_includes_head=True)
# # arrow(jpca.loc[-5,0], jpca.loc[-5,1], jpca.loc[0,0]-jpca.loc[-5,0], jpca.loc[0,1]-jpca.loc[-5,1], head_width=.020, fc = '#5c7d6f', shape='full', lw=0, length_includes_head=True)
# # arrow(jpca.loc[-45,0], jpca.loc[-45,1], jpca.loc[-40,0]-jpca.loc[-45,0], jpca.loc[-40,1]-jpca.loc[-45,1], head_width=.020, fc = '#5c7d6f', shape='full', lw=0, length_includes_head=True)
# # arrow(jpca.loc[-115,0], jpca.loc[-115,1], jpca.loc[-110,0]-jpca.loc[-115,0], jpca.loc[-110,1]-jpca.loc[-115,1], head_width=.020, fc = '#5c7d6f', shape='full', lw=0, length_includes_head=True)
# for t in np.arange(-200,250,50):
# arrow(jpca.loc[t-5,0], jpca.loc[t-5,1], jpca.loc[t,0]-jpca.loc[t-5,0], jpca.loc[t,1]-jpca.loc[t-5,1], head_width=.020, fc = '#5c7d6f', shape='full', lw=0, length_includes_head=True)
########################################################################
# C circle
########################################################################
ax = subplot(gs[0:2,1:3])
ax.set_aspect("equal")
text(-0.5, 1.00, "c", transform = gca().transAxes, fontsize = 10, fontweight='bold')
axis('off')
axhline(0, xmin = 0.25, xmax = 0.75, color = 'black', linewidth = 1)
axvline(0, ymin = 0.25, ymax = 0.75, color = 'grey', linewidth = 1)
xlim(-20, 20)
# ylim(-14, 16)
ylim(-18,22)
phase_circle = np.arange(0, 2*np.pi, 0.0001)
# x, y = (np.cos(phi2.values.flatten()), np.sin(phi2.values.flatten()))
x, y = (np.cos(phase_circle),np.sin(phase_circle))
r = 14
plot(x*r, y*r, '-', color = 'black', linewidth = 0.5)
r = r+1
text(-r, 0,'$\pi$', horizontalalignment='center', verticalalignment='center', fontsize = 7)
text(r, 0,'0', horizontalalignment='center', verticalalignment='center', fontsize = 7)
text(0, r,'$\pi/2$', horizontalalignment='center', verticalalignment='center', fontsize = 7)
text(0, -r,'$3\pi/2$', horizontalalignment='center', verticalalignment='center', fontsize = 7)
text(r-7, -2.5, 'jPC1', fontsize = 8)
text(0.7, r-6, 'jPC2', fontsize = 8)
text(0.16,0.95,"Theta phase", fontsize =8,transform=ax.transAxes, color = 'red')
color_points = allthetamodth['phase'].copy()
color_points -= color_points.min()
color_points /= color_points.max()
# scatter(jscore.values[:,0], jscore.values[:,1], s = 3, c = color_points.values, cmap = cm.get_cmap('hsv'), zorder = 2, alpha = 1, linewidth = 0.0)
# scatter(jscore.values[:,0], jscore.values[:,1], s = 3, c = 'black', zorder = 2, alpha = 0.7, linewidth = 0.0)
scatter(jscore.values[:,0], jscore.values[:,1], s = 5, c = allzth.values[:,100], cmap = cm.get_cmap('viridis'), zorder = 2, alpha = 0.7, linewidth = 0.0)
bb = ax.get_position().bounds
aiw = 0.1
ail = 0.1
position_axes = [
[bb[0]+bb[2]*0.85,bb[1]+bb[3]*0.70],
[bb[0]+bb[2]*-0.2,bb[1]+bb[3]*0.70],
[bb[0]+bb[2]*-0.2,bb[1]+bb[3]*-0.1],
[bb[0]+bb[2]*0.85,bb[1]+bb[3]*-0.1]]
r -= 1
best_neurons = []
lbs = ['a', 'b', 'c', 'd']
for i,j in zip(np.arange(0, 2*np.pi, np.pi/2),np.arange(4)):
quarter = phi2[np.logical_and(phi2 > i, phi2 < i+(np.pi/2)).values]
tmp = jscore.loc[quarter.index.values]
if j == 2:
best_n = np.abs(allthetamodth.loc[tmp.index.values,'phase'] - (i+np.pi/8)).sort_values().index.values[9]
elif j == 0:
best_n = np.abs(allthetamodth.loc[tmp.index.values,'phase'] - (i+np.pi/8)).sort_values().index.values[4]
elif j == 3:
best_n = np.abs(allthetamodth.loc[tmp.index.values,'phase'] - (i+np.pi/8)).sort_values().index.values[1]
else:
best_n = np.abs(allthetamodth.loc[tmp.index.values,'phase'] - (i+np.pi/8)).astype('float').idxmin()
best_neurons.append(best_n)
ai = axes([position_axes[j][0],position_axes[j][1], aiw, ail], projection = 'polar')
ai.get_xaxis().tick_bottom()
ai.get_yaxis().tick_left()
ai.hist(spikes_theta_phase['rem'][best_n], 30, color = 'red', normed = True)
xticks(np.arange(0, 2*np.pi, np.pi/4), ['0', '', '$\pi/2$', '', '$\pi$', '', '$3\pi/2$',''])
yticks([])
grid(linestyle = '--')
# xlabel(lbs[j])
# if j == 1:
# # ai.set_title("Theta phase", fontsize = 8, color = 'red')
# ai.text(1,1,"Theta phase", fontsize =8, color = 'red')
ai.yaxis.grid(False)
ai.tick_params(axis='x', pad = -5)
# ai.set_ylim(0,0.5)
ai.arrow(x = allthetamodth.loc[best_n,'phase'], y = 0, dx = 0, dy = ai.get_ylim()[1]*0.6,
edgecolor = 'black', facecolor = 'green', lw = 1.0, head_width = 0.1, head_length = 0.02,zorder = 5)
x = np.cos(quarter.loc[best_n,0])*r
y = np.sin(quarter.loc[best_n,0])*r
xx, yy = (jscore.loc[best_n,0],jscore.loc[best_n,1])
ax.scatter(x, y, s = 20, c = 'red', cmap = cm.get_cmap('viridis'), alpha = 1)
ax.scatter(xx, yy, s = 6, c = 'red', cmap = cm.get_cmap('viridis'), zorder = 2)
ax.arrow(xx, yy, x - xx, y - yy,
head_width = 0.8,
linewidth = 0.6,
length_includes_head = True,
color = 'grey'
)
text(0.77,0.72, "a", fontsize =9,transform=ax.transAxes)
text(0.14,0.71,"b", fontsize =9,transform=ax.transAxes)
text(0.15,0.16,"c", fontsize =9,transform=ax.transAxes)
text(0.73,0.1,"d", fontsize =9,transform=ax.transAxes)
# text(0, 0, '$\mathbf{SWR\ jPCA\ phase}$',horizontalalignment='center')
text(-0.2, 0.45, 'SWR jPCA phase', fontsize = 8, horizontalalignment='center', verticalalignment='center', transform=ax.transAxes)
# lower_left = np.argmin(jscore.values[:,0])
# text(-35.,-7, 'arctan2', rotation = 13.0)
# cbaxes = fig.add_axes([0.25, 0.45, 0.01, 0.04])
# cmap = cm.viridis
# norm = matplotlib.colors.Normalize(allzth.values[:,100].min(), allzth.values[:,100].max())
# cb = matplotlib.colorbar.ColorbarBase(cbaxes, cmap = cmap, norm = norm)
# cbaxes.axes.set_xlabel('SWR \n modulation')
#########################################################################
# D PHASE PHASE SCATTER
#########################################################################
# gs = gridspec.GridSpecFromSubplotSpec(1,1, subplot_spec = outer[4])
ax = subplot(gs[2,0])
simpleaxis(ax)
ax.set_aspect("equal")
# dist_cp = np.sqrt(np.sum(np.power(eigen[0] - eigen[1], 2))
theta_mod_toplot = allthetamodth.values[:,0].astype('float32')#,dist_cp>0.02]
phi_toplot = phi2.values.flatten()
r, p = corr_circular_(theta_mod_toplot, phi_toplot)
print(r, p)
x = np.concatenate([theta_mod_toplot, theta_mod_toplot, theta_mod_toplot+2*np.pi, theta_mod_toplot+2*np.pi])
y = np.concatenate([phi_toplot, phi_toplot + 2*np.pi, phi_toplot, phi_toplot + 2*np.pi])
# scatter(x, y, s = 2, c = np.tile(allzth.values[:,100],4), cmap = cm.get_cmap('viridis'), zorder = 2, alpha = 0.5)
# # scatter(x, y, s = 0.8, c = np.tile(color_points,4), cmap = cm.get_cmap('hsv'), zorder = 2, alpha = )
# xticks([0, np.pi, 2*np.pi, 3*np.pi, 4*np.pi], ('0', '$\pi$', '$2\pi$', '$3\pi$', '$4\pi$'))
# yticks([0, np.pi, 2*np.pi, 3*np.pi, 4*np.pi], ('0', '$\pi$', '$2\pi$', '$3\pi$', '$4\pi$'))
# xlabel('Theta phase (rad.)', labelpad = 1.2)
# ylabel('SWR jPCA phase (rad.)')
gca().text(0.15, 0.9, r'$r = 0.18$', transform = gca().transAxes, fontsize = 8, color = 'white')
gca().text(0.15, 0.78, r'$ p = 2.3 \times 10^{-7}$', transform = gca().transAxes, fontsize = 8, color = 'white')
gca().text(-0.9, 1.05, "d", transform = gca().transAxes, fontsize = 10, fontweight='bold')
# gs = gridspec.GridSpecFromSubplotSpec(1,1, subplot_spec = outer[5])
# ax = subplot(gs[2,2])
# text(-0.1, 1.1, "F", transform = gca().transAxes, fontsize = 10)
H, xedges, yedges = np.histogram2d(y, x, 50)
H = gaussFilt(H, (3,3))
H = H - H.min()
H = H / H.max()
print(np.sum(np.isnan(H)))
# imshow(H, origin = 'lower', interpolation = 'nearest', aspect = 'auto')
# levels = np.linspace(H.min(), H.max(), 50)
axp = ax.contourf(H, cmap = 'Greys', extent = (xedges[0], xedges[-2], yedges[0], yedges[-2]))
# xticks([0, np.pi, 2*np.pi, 3*np.pi, 4*np.pi], ('0', '$\pi$', '$2\pi$', '$3\pi$', '$4\pi$'))
# yticks([0, np.pi, 2*np.pi, 3*np.pi, 4*np.pi], ('0', '$\pi$', '$2\pi$', '$3\pi$', '$4\pi$'))
xlabel('Theta phase (rad.)', labelpad = 0.1)
ylabel('SWR jPCA \nphase (rad.)', labelpad = 4)
tik = np.array([0, np.pi, 2*np.pi, 3*np.pi])
# xtik = [np.argmin(np.abs(i-xedges)) for i in tik]
# ytik = [np.argmin(np.abs(i-yedges)) for i in tik]
xticks(tik, ('0', '$\pi$', '$2\pi$', '$3\pi$'))
yticks(tik, ('0', '$\pi$', '$2\pi$', '$3\pi$'))
title("Density", fontsize = 8, y = 0.94)
scatter(allthetamodth.loc[best_neurons, 'phase'].values, phi2.loc[best_neurons].values.flatten(), color = 'red', s = 6, zorder = 5)
for i in range(4):
xy = (allthetamodth.loc[best_neurons, 'phase'].values[i], phi2.loc[best_neurons].values.flatten()[i])
annotate(lbs[i], xy, (xy[0]+0.1, xy[1]+0.2), color = 'white')
# cbaxes = fig.add_axes([0.4, 0.4, 0.04, 0.01])
# cb = colorbar(axp, cax = cbaxes, orientation = 'horizontal', ticks = [0, 1])
# # cbaxes.yaxis.set_ticks_position('left')
#colorbar
cax = inset_axes(gca(), "4%", "20%",
bbox_to_anchor=(1.0, 0.0, 1, 1),
bbox_transform=gca().transAxes,
loc = 'lower left')
cb = colorbar(axp, cax = cax, orientation = 'vertical', ticks = [0.25, 0.75])
###############################################################################################################
# TO LOAD
###############################################################################################################
def softmax(x, b1 = 10.0, b2 = 0.5, lb = 0.2):
x -= x.min()
x /= x.max()
return (1.0/(1.0+np.exp(-(x-b2)*b1)) + lb)/(1.0+lb)
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True)
nbins = 400
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
swr_mod = pd.DataFrame( columns = swr_ses,
index = times,
data = gaussFilt(swr_mod, (20,)).transpose())
swr_mod = swr_mod.drop(swr_mod.columns[swr_mod.isnull().any()].values, axis = 1)
swr_mod = swr_mod.loc[-500:500]
mappings = pd.read_hdf("/mnt/DataGuillaume/MergedData/MAPPING_NUCLEUS.h5")
nbins = 200
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
times2 = swr_mod.index.values
# carte38_mouse17_2 = imread('../../figures/mapping_to_align/paxino/paxino_38_mouse17_2.png')
# bound_map_38 = (-2336/1044, 2480/1044, 0, 2663/1044)
# cut_bound_map = (-86/1044, 2480/1044, 0, 2663/1044)
carte_adrien = imread('/home/guillaume/Dropbox (Peyrache Lab)/Peyrache Lab Team Folder/Projects/HPC-Thal/Figures/ATAnatomy_ALL-01.png')
carte_adrien2 = imread('/home/guillaume/Dropbox (Peyrache Lab)/Peyrache Lab Team Folder/Projects/HPC-Thal/Figures/ATAnatomy_Contour-01.png')
bound_adrien = (-398/1254, 3319/1254, -(239/1254 - 20/1044), 3278/1254)
shifts = np.array([ [-0.34, 0.56],
[0.12, 0.6],
[-0.35, 0.75],
[-0.3, 0.5]
])
angles = np.array([15.0, 10.0, 15.0, 20.0])
nucleus = ['AD', 'AM', 'AVd', 'AVv', 'IAD', 'MD', 'PV', 'sm']
swr_nuc = pd.DataFrame(index = swr_mod.index, columns = pd.MultiIndex.from_product([['Mouse12', 'Mouse17', 'Mouse20', 'Mouse32'],nucleus,['mean', 'sem']]))
neurons = np.intersect1d(swr_mod.columns.values, mappings.index.values)
for m in ['Mouse12', 'Mouse17', 'Mouse20', 'Mouse32']:
subspace = mappings.loc[neurons][mappings.loc[neurons].index.str.contains(m)]
groups = subspace.groupby(['nucleus']).groups
for n in nucleus:
if len(groups[n])>3:
swr_nuc[(m,n)] = pd.concat([swr_mod[groups[n]].mean(1),swr_mod[groups[n]].sem(1)], axis = 1).rename(columns={0:'mean',1:'sem'})
swr_all = pd.DataFrame(index = swr_mod.index, columns = nucleus)
mappings = mappings.loc[neurons]
for n in nucleus:
swr_all[n] = swr_mod[mappings[mappings['nucleus'] == n].index.values].mean(1)
xs = [-0.4,0.35]
ys = [-0.3,0.25]
lbs = ['A', 'B', 'C', 'D']
colors = ['blue', 'red', 'green', '#ff934f']
toplot = pd.DataFrame(index = ['Mouse12','Mouse17','Mouse20','Mouse32'], columns = pd.MultiIndex.from_product([range(3),['start','end']]))
for i,j,k in zip(range(3),[-80,120,250],[0,200,330]):
toplot.loc['Mouse17',(i,'start')] = j
toplot.loc['Mouse17',(i,'end')] = k
alljpc = dict()
pos = [1,0,2,3]
i = 0
m = 'Mouse17'
data = cPickle.load(open("../../data/maps/"+m+".pickle", 'rb'))
theta = data['movies']['theta']
swr = data['movies']['swr']
total = data['total']
x = data['x']
y = data['y']
headdir = data['headdir']
jpc = data['jpc']
jpc = pd.DataFrame(index = times2, data = jpc)
toplot = pd.DataFrame(index = ['Mouse12','Mouse17','Mouse20','Mouse32'], columns = pd.MultiIndex.from_product([range(3),['start','end']]))
for i,j,k in zip(range(3),[-80,120,250],[0,200,330]):
toplot.loc['Mouse17',(i,'start')] = j
toplot.loc['Mouse17',(i,'end')] = k
#####################################################################
# E MAPS MOUSE 17
#####################################################################
gsm = gridspec.GridSpecFromSubplotSpec(1,3, subplot_spec = gs[2,1:])
bound = cPickle.load(open("../../figures/figures_articles/figure1/rotated_images_"+m+".pickle", 'rb'))['bound']
newswr = []
for j in range(3):
tmp = swr[:,:,np.where(times2 == toplot.loc[m,(j,'start')])[0][0]:np.where(times2 == toplot.loc[m,(j,'end')])[0][0]].mean(-1)
xnew, ynew, frame = interpolate(tmp.copy(), x, y, 0.01)
frame = gaussian_filter(frame, (10, 10))
newswr.append(frame)
newswr = np.array(newswr)
newswr = newswr - newswr.min()
newswr = newswr / newswr.max()
newswr = softmax(newswr, 10, 0.5, 0.0)
for j in range(3):
subplot(gsm[0,j])
if j == 0:
text(-0.1, 1.05, "e", transform = gca().transAxes, fontsize = 10, fontweight='bold')
if j == 1:
title("SWR modulation (Mouse 1)", pad = -0.05)
noaxis(gca())
image = newswr[j]
h, w = image.shape
rotated_image = np.zeros((h*3, w*3))*np.nan
rotated_image[h:h*2,w:w*2] = image.copy() + 1.0
rotated_image = rotateImage(rotated_image, -angles[pos[i]])
rotated_image[rotated_image == 0.0] = np.nan
rotated_image -= 1.0
tocrop = np.where(~np.isnan(rotated_image))
rotated_image = rotated_image[tocrop[0].min()-1:tocrop[0].max()+1,tocrop[1].min()-1:tocrop[1].max()+1]
imshow(carte_adrien2, extent = bound_adrien, interpolation = 'bilinear', aspect = 'equal')
im = imshow(rotated_image, extent = bound, alpha = 0.8, aspect = 'equal', cmap = 'bwr')
xlim(np.minimum(bound_adrien[0],bound[0]),np.maximum(bound_adrien[1],bound[1]))
ylim(np.minimum(bound_adrien[2],bound[2]),np.maximum(bound_adrien[3],bound[3]))
xlabel(str(toplot.loc[m,(j,'start')])+r"ms $\rightarrow$ "+str(toplot.loc[m,(j,'end')])+"ms")
#colorbar
cax = inset_axes(gca(), "4%", "20%",
bbox_to_anchor=(0.75, 0.0, 1, 1),
bbox_transform=gca().transAxes,
loc = 'lower left')
cb = colorbar(im, cax = cax, orientation = 'vertical', ticks = [0.25, 0.75])
# cb.set_label('Burstiness', labelpad = -4)
# cb.ax.xaxis.set_tick_params(pad = 1)
# cax.set_title("Cluster 2", fontsize = 6, pad = 2.5)
#####################################################################
# F SWR NUCLEUS
#####################################################################
gsm = gridspec.GridSpecFromSubplotSpec(len(nucleus)+1,2, subplot_spec = gs[4,:], height_ratios = [1]+[0.1]*len(nucleus))
idm = swr_all.idxmax()
idm = idm.sort_values()
order = idm.index.values
subplot(gsm[0,0])
simpleaxis(gca())
for n in nucleus:
plot(swr_all[n], label = n)
axvline(0, linestyle = '--', linewidth = 1, alpha = 0.5, color = 'black')
xticks([], [])
text(-0.17, 1.1, "f", transform = gca().transAxes, fontsize = 10, fontweight='bold')
ylabel("SWRs mod.", labelpad = 2, y = 0.6)
xlim(-500,500)
legend(frameon=False,loc = 'lower left', bbox_to_anchor=(0.6,0.38),handlelength=1,ncol = 2)
######################################################################
for i, n in enumerate(order):
subplot(gsm[i+1,0])
tmp = swr_nuc.xs(n,1,1).xs('mean',1,1).T.values.astype(np.float32)
imshow(tmp, aspect = 'auto', cmap = 'bwr')
if i == len(nucleus)-1:
xticks([0,100,200],[-500,0,500])
xlabel("Time lag (ms)", labelpad = 0.9)
else:
xticks([], [])
# if i == 0:
# # yticks([0,3], [1,4])
# ylabel(n, rotation = 0, labelpad = 8, y = 0.2)
# else:
yticks([0,3], ['',''])
ylabel(n, rotation = 0, labelpad = 11, y = 0.2)
# if i == len(order)-1:
if i == 0:
annotate("Mouse", (1.08, 2.4), (1.04, 2.7 ), xycoords = 'axes fraction', fontsize = 7)
annotate("1", (1, 0.75+0.125), (1.08, 1.6 ), xycoords = 'axes fraction', fontsize = 7, arrowprops = {'arrowstyle':'-'})
annotate("2", (1, 0.50+0.125), (1.08, 0.8), xycoords = 'axes fraction', fontsize = 7, arrowprops = {'arrowstyle':'-'})
annotate("3", (1, 0.25+0.125), (1.08, 0.0), xycoords = 'axes fraction', fontsize = 7, arrowprops = {'arrowstyle':'-'})
annotate("4", (1, 0.0 +0.125), (1.08, -0.8 ), xycoords = 'axes fraction', fontsize = 7, arrowprops = {'arrowstyle':'-'})
#####################################################################
# G ORBIT
#####################################################################
subplot(gsm[:,1])
simpleaxis(gca())
gca().set_aspect("equal")
data = cPickle.load(open('../../figures/figures_articles/figure3/dict_fig3_article.pickle', 'rb'))
allzth = data['swr_modth' ]
eigen = data['eigen' ]
times = data['times' ]
allthetamodth = data['theta_modth' ]
phi = data['phi' ]
zpca = data['zpca' ]
phi2 = data['phi2' ]
jX = data['rX' ]
jscore = data['jscore' ]
force = data['force' ] # theta modulation
variance = data['variance' ] # ripple modulation
plot(jX[0,0], jX[0,1], 'o', markersize = 3, color = 'black')
plot(jX[:,0], jX[:,1], linewidth = 1, color = 'black')
arrow(jX[-10,0],jX[-10,1],jX[-1,0]-jX[-10,0],jX[-1,1]-jX[-10,1], color = 'black', head_width = 0.01)
gca().spines['left'].set_bounds(np.min(jX[:,1]), np.min(jX[:,1]+0.1))
gca().spines['bottom'].set_bounds(np.min(jX[:,0]), np.min(jX[:,0]+0.1))
# gca().spines['left'].set_visible(False)
# gca().spines['bottom'].set_visible(False)
xticks([], [])
yticks([], [])
gca().xaxis.set_label_coords(0.15, -0.02)
gca().yaxis.set_label_coords(-0.02, 0.15)
ylabel('jPC2')
xlabel('jPC1')
text(-0.1, 1.02, "g", transform = gca().transAxes, fontsize = 10, fontweight='bold')
jpca = pd.DataFrame(index = times2, data = jX)
fontsize = np.array([swr_all.loc[idm[n],n]*100.0 for n in order])
fontsize -= np.min(fontsize)
fontsize /= np.max(fontsize)
fontsize = 1/(1+np.exp(-8*(fontsize-0.5)))
fontsize = fontsize*6 + 7.0
fontsize = pd.Series(index = order, data = fontsize)
for n in order :
if n == 'PV':
text(jpca.loc[idm[n],0]-0.04,jpca.loc[idm[n],1]+0.03,n,
ha = 'center',
va = 'center',
bbox = dict(boxstyle='square,pad=0.1',fc='white',ec='none'),
fontsize = fontsize[n])
else:
text(jpca.loc[idm[n],0],jpca.loc[idm[n],1],n,
ha = 'center',
va = 'center',
bbox = dict(boxstyle='square,pad=0.1',fc='white',ec='none'),
fontsize = fontsize[n])
tx = [-100,0]
# plot(jpca.loc[tx,0], jpca.loc[tx,1], '.', color = 'black')
annotate('0 ms', (jpca.loc[0,0],jpca.loc[0,1]), (jpca.loc[0,0]+0.009,jpca.loc[0,1]-0.02))
# annotate('-100 ms', (jpca.loc[-100,0],jpca.loc[-100,1]), (jpca.loc[-100,0]-0.0,jpca.loc[-100,1]-0.03))
annotate('50 ms', (jpca.loc[50,0],jpca.loc[50,1]), (jpca.loc[50,0]-0.0,jpca.loc[50,1]+0.01))
offs = 0.1
# arrow(jpca.loc[50,0], jpca.loc[50,1], jpca.loc[55,0]-jpca.loc[50,0], jpca.loc[55,1]-jpca.loc[50,1], head_width=.015, fc = 'black', shape='full', lw=0, length_includes_head=True)
# arrow(jpca.loc[0,0], jpca.loc[0,1], jpca.loc[5,0]-jpca.loc[0,0], jpca.loc[5,1]-jpca.loc[0,1], head_width=.015, fc = 'black', shape='full', lw=0, length_includes_head=True)
# arrow(jpca.loc[-45,0], jpca.loc[-45,1], jpca.loc[-40,0]-jpca.loc[-45,0], jpca.loc[-40,1]-jpca.loc[-45,1], head_width=.015, fc = 'black', shape='full', lw=0, length_includes_head=True)
# arrow(jpca.loc[-115,0], jpca.loc[-115,1], jpca.loc[-110,0]-jpca.loc[-115,0], jpca.loc[-110,1]-jpca.loc[-115,1], head_width=.015, fc = 'black', shape='full', lw=0, length_includes_head=True)
for t in np.arange(-200,250,50):
arrow(jpca.loc[t-5,0], jpca.loc[t-5,1], jpca.loc[t,0]-jpca.loc[t-5,0], jpca.loc[t,1]-jpca.loc[t-5,1], head_width=.018, fc = 'black', shape='full', lw=0, length_includes_head=True)
savefig("../../figures/figures_articles_v3/figart_4.pdf", dpi = 900, facecolor = 'white')
os.system("evince ../../figures/figures_articles_v3/figart_4.pdf &")
|
gpl-3.0
|
bowenliu16/deepchem
|
examples/sider/sider_rf.py
|
6
|
1168
|
"""
Script that trains Sklearn multitask models on the sider dataset
@Author Bharath Ramsundar, Aneesh Pappu
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import shutil
import numpy as np
import deepchem as dc
from sider_datasets import load_sider
from sklearn.ensemble import RandomForestClassifier
sider_tasks, datasets, transformers = load_sider()
train_dataset, valid_dataset, test_dataset = datasets
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean,
mode="classification")
def model_builder(model_dir):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=100)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(sider_tasks, model_builder)
# Fit trained model
model.fit(train_dataset)
model.save()
print("About to evaluate model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
|
gpl-3.0
|
glgerard/cloud-mdbn
|
src/params_check.py
|
2
|
3042
|
"""
Copyright (c) 2016 Gianluca Gerard
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import matplotlib.pyplot as plt
import numpy as np
import MDBN
# Initialize background to dark gray
def display_weights(W, nRows=5, nCols=8, dimX = 20, dimY = 40 ):
X = np.vstack((W,np.zeros((dimX*dimY-W.shape[0],W.shape[1]))))
tiled = np.ones(((dimY+1)*nRows, (dimX+1)*nCols), dtype='uint8') * 51
for row in xrange(nRows):
for col in xrange(nCols):
patch = X[:,row*nCols + col].reshape((dimY,dimX))
normPatch = ((patch - patch.min()) /
(patch.max()-patch.min()+1e-6))
tiled[row*(dimY+1):row*(dimY+1)+dimY, col*(dimX+1):col*(dimX+1)+dimX] = \
normPatch * 255
plt.imshow(tiled)
def display_sample(X, dimX=20, dimY=40, cmap='gray'):
y = np.zeros(dimX * dimY)
y[:X.shape[0] - dimX*dimY] = X
plt.imshow(y.reshape(dimX,dimY),cmap=cmap)
def plotit(values):
plt.hist(values);
plt.title('mm = %g' % np.mean(np.fabs(values)))
def run(datafile, training_fn):
dbn, _, _ = training_fn(datafile,graph_output=True, layers_sizes=[290, 40],
pretrain_lr=[0.01, 0.01], pretraining_epochs=[8000, 800])
hbias = dbn.rbm_layers[0].hbias.get_value(borrow=True)
vbias = dbn.rbm_layers[0].vbias.get_value(borrow=True)
W = dbn.rbm_layers[0].W.get_value(borrow=True)
return hbias, vbias, W
"""
Utils to support visual tuning of the learning parameters
See http://yosinski.com/media/papers/Yosinski2012VisuallyDebuggingRestrictedBoltzmannMachine.pdf
"Visually Debugging Restricted Boltzmann Machine Training
with a 3D Example" Yosinski 2010
"""
if __name__ == '__main__':
datafiles = MDBN.prepare_TCGA_datafiles()
hbias, vbias, W = run(datafiles['GE'],MDBN.train_GE)
plt.close(1)
plt.close(2)
plt.figure(3)
plt.subplot(221); plotit(W)
plt.subplot(222); plotit(hbias)
plt.subplot(223); plotit(vbias)
plt.figure(4)
display_weights(W, nRows = 20, nCols = 14, dimX = 126, dimY = 128 ) # GE
|
apache-2.0
|
jm-begon/scikit-learn
|
sklearn/neighbors/regression.py
|
106
|
10572
|
"""Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
|
bsd-3-clause
|
fzalkow/scikit-learn
|
examples/classification/plot_lda_qda.py
|
164
|
4806
|
"""
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
|
bsd-3-clause
|
Intel-Corporation/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
|
6
|
54502
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features],
0), array_ops.concat([labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.cast(features['feature'], dtypes.int64))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
update_global_step = training_util.get_global_step().assign_add(1)
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return (const, const,
control_flow_ops.group(train_op_1, training_op_2,
update_global_step))
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, update_global_step
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(expected_features, expected_labels, actual_features,
actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1))
def _make_input_fn(features, labels):
def _input_fn():
return {k: constant_op.constant(v)
for k, v in six.iteritems(features)}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn,
params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features,
labels,
mode,
params,
config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return (constant_op.constant(0.), constant_op.constant(0.),
training_util.get_global_step().assign_add(1))
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing train_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(
dtype=dtypes.string, shape=[None], name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(features, labels, {
'examples': serialized_tf_example
})
est.export_savedmodel(
os.path.join(est.model_dir, 'export'), serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(
model_fn=linear_model_fn, config=config, model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError, 'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(
model_fn=linear_model_fn, config=config, model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(
model_fn=linear_model_fn, model_dir='test_dir', config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir, model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
# TODO(b/78461127): Please modify tests to not directly rely on names of
# checkpoints.
self.assertAllEqual(['model.ckpt-0', 'model.ckpt-5'],
ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1, model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2, model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={
'learning_rate': 0.01
}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(
input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testSummaryWritingWithSummaryProto(self):
def _streaming_mean_squared_error_histogram(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
metrics, update_ops = metric_ops.streaming_mean_squared_error(
predictions,
labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
return summary.histogram('histogram', metrics), update_ops
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={
'MSE': _streaming_mean_squared_error_histogram
})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('MSE' in output_values)
self.assertTrue(output_values['MSE'].HasField('histo'))
def testSummaryWritingWithTensor(self):
def _streaming_precition_mean_tensor(predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
return metric_ops.streaming_mean_tensor(
predictions,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={
'PMT': _streaming_precition_mean_tensor
})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('PMT' in output_values)
self.assertTrue(output_values['PMT'].HasField('tensor'))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertItemsEqual(['bogus_lookup', 'feature'], [
compat.as_str_any(x)
for x in graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS)
])
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_graph_transforms(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base,
serving_input_fn,
assets_extra=assets_extra,
graph_rewrite_specs=[
estimator.GraphRewriteSpec(['tag_1'], []),
estimator.GraphRewriteSpec(['tag_2', 'tag_3'],
['strip_unused_nodes'])
])
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
# tag_1 is untransformed.
tags = ['tag_1']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertIn('input_example_tensor', graph_ops)
self.assertIn('ParseExample/ParseExample', graph_ops)
self.assertIn('linear/linear/feature/matmul', graph_ops)
# Since there were no transforms, both save ops are still present.
self.assertIn('save/SaveV2/tensor_names', graph_ops)
self.assertIn('save_1/SaveV2/tensor_names', graph_ops)
# Since there were no transforms, the hash table lookup is still there.
self.assertIn('hash_table_Lookup/LookupTableFindV2', graph_ops)
# Restore, to validate that the export was well-formed.
# tag_2, tag_3 was subjected to strip_unused_nodes.
tags = ['tag_2', 'tag_3']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# The Saver used to restore the checkpoint into the export Session
# was not added to the SAVERS collection, so strip_unused_nodes removes
# it. The one explicitly created in export_savedmodel is tracked in
# the MetaGraphDef saver_def field, so that one is retained.
# TODO(soergel): Make Savers sane again. I understand this is all a bit
# nuts but for now the test demonstrates what actually happens.
self.assertFalse('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# The fake hash table lookup wasn't connected to anything; stripped.
self.assertFalse('hash_table_Lookup' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual({
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
}, feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool), None)
)
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table.resource_handle.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table.resource_handle.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
nicaogr/Style-Transfer
|
Style_Transfer.py
|
1
|
87672
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 14 15:32:49 2017
The goal of this script is to code the Style Transfer Algorithm
Inspired from https://github.com/cysmith/neural-style-tf/blob/master/neural_style.py
and https://github.com/leongatys/PytorchNeuralStyleTransfer/blob/master/NeuralStyleTransfer.ipynb
@author: nicolas
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='0' # 1 to remove info, 2 to remove warning and 3 for all
import tensorflow as tf
import scipy.io
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import time
import pickle
import math
from tensorflow.python.client import timeline
from Arg_Parser import get_parser_args
import utils
from numpy.fft import fft2, ifft2
from skimage.color import gray2rgb
import Misc
# Name of the 19 first layers of the VGG19
VGG19_LAYERS = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
#layers = [2 5 10 19 28]; for texture generation
style_layers_size = {'input':3,'conv1' : 64,'relu1' : 64,'pool1': 64,'conv2' : 128,'relu2' : 128,'pool2':128,'conv3' : 256,'relu3' : 256,'pool3':256,'conv4': 512,'relu4' : 512,'pool4':512,'conv5' : 512,'relu5' : 512,'pool5':512}
# TODO : check if the N value are right for the poolx
def plot_image(path_to_image):
"""
Function to plot an image
"""
img = Image.open(path_to_image)
plt.imshow(img)
def get_vgg_layers(VGG19_mat='normalizedvgg.mat'):
"""
Load the VGG 19 layers
"""
VGG19_mat
if(VGG19_mat=='imagenet-vgg-verydeep-19.mat') or (VGG19_mat=='random_net.mat'):
# The vgg19 network from http://www.vlfeat.org/matconvnet/pretrained/
try:
vgg_rawnet = scipy.io.loadmat(VGG19_mat)
vgg_layers = vgg_rawnet['layers'][0]
except(FileNotFoundError):
print("The path to the VGG19_mat is not right or the .mat is not here")
print("You can download it here : http://www.vlfeat.org/matconvnet/models/imagenet-vgg-verydeep-19.mat")
raise
elif(VGG19_mat=='normalizedvgg.mat') or (VGG19_mat=='zero_net.mat'): # Normalized VGG19 network over images (see Gatys Texture papers) generated by Gang with the Caffe model provide by Gatys
try:
vgg_rawnet = scipy.io.loadmat(VGG19_mat)
vgg_layers = vgg_rawnet['net'][0]['layers'][0][0]
except(FileNotFoundError):
print("The path to the VGG19_mat is not right or the .mat is not here")
print("You have to get the weight from https://github.com/leongatys/DeepTextures and convert them to .mat format.")
raise
else:
print("The path to the VGG19_mat is unknown.")
return(vgg_layers)
def net_preloaded(vgg_layers, input_image,pooling_type='avg',padding='SAME'):
"""
This function read the vgg layers and create the net architecture
We need the input image to know the dimension of the input layer of the net
"""
net = {}
_,height, width, numberChannels = input_image.shape # In order to have the right shape of the input
current = tf.Variable(np.zeros((1, height, width, numberChannels), dtype=np.float32))
net['input'] = current
for i, name in enumerate(VGG19_LAYERS):
kind = name[:4]
if(kind == 'conv'):
#if(VGG19_mat=='texturesyn_normalizedvgg.mat'):
# Only way to get the weight of the kernel of convolution
# Inspired by http://programtalk.com/vs2/python/2964/facenet/tmp/vggverydeep19.py/
kernels = vgg_layers[i][0][0][2][0][0]
bias = vgg_layers[i][0][0][2][0][1]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = tf.constant(np.transpose(kernels, (1,0 ,2, 3)))
bias = tf.constant(bias.reshape(-1))
current = conv_layer(current, kernels, bias,name,padding)
# Update the variable named current to have the right size
elif(kind == 'relu'):
current = tf.nn.relu(current,name=name)
elif(kind == 'pool'):
current = pool_layer(current,name,pooling_type,padding)
net[name] = current
assert len(net) == len(VGG19_LAYERS) +1 # Test if the length is right
return(net)
def conv_layer(input, weights, bias,name,padding='SAME'):
"""
This function create a conv2d with the already known weight and bias
conv2d :
Computes a 2-D convolution given 4-D input and filter tensors.
input: A Tensor. Must be one of the following types: half, float32, float64
Given an input tensor of shape [batch, in_height, in_width, in_channels] and
a filter / kernel tensor of shape
[filter_height, filter_width, in_channels, out_channels]
"""
stride = 1
if(padding=='SAME'):
conv = tf.nn.conv2d(input, weights, strides=(1, stride, stride, 1),
padding=padding,name=name)
elif(padding=='VALID'):
input = get_img_2pixels_more(input)
conv = tf.nn.conv2d(input, weights, strides=(1, stride, stride, 1),
padding='VALID',name=name)
# We need to impose the weights as constant in order to avoid their modification
# when we will perform the optimization
return(tf.nn.bias_add(conv, bias))
def get_img_2pixels_more(input):
new_input = tf.concat([input,input[:,0:2,:,:]],axis=1)
new_input = tf.concat([new_input,new_input[:,:,0:2,:]],axis=2)
return(new_input)
def pool_layer(input,name,pooling_type='avg',padding='SAME'):
"""
Average pooling on windows 2*2 with stride of 2
input is a 4D Tensor of shape [batch, height, width, channels]
Each pooling op uses rectangular windows of size ksize separated by offset
strides in the avg_pool function
"""
stride_pool = 2
if(padding== 'VALID'): # TODO Test if paire ou impaire !!!
_,h,w,_ = input.shape
if not(h%2==0):
input = tf.concat([input,input[:,0:2,:,:]],axis=1)
if not(w%2==0):
input = tf.concat([input,input[:,:,0:2,:]],axis=2)
if pooling_type == 'avg':
pool = tf.nn.avg_pool(input, ksize=(1, 2, 2, 1), strides=(1, stride_pool, stride_pool, 1),
padding=padding,name=name)
elif pooling_type == 'max':
pool = tf.nn.max_pool(input, ksize=(1, 2, 2, 1), strides=(1, stride_pool, stride_pool, 1),
padding=padding,name=name)
return(pool)
def sum_content_losses(sess, net, dict_features_repr,M_dict,content_layers):
"""
Compute the content term of the loss function
Input :
- the tensforflow session sess
- the vgg19 net
- the dictionnary of the content image representation thanks to the net
"""
length_content_layers = float(len(content_layers))
weight_help_convergence = 10**9 # Need to multiply by 120000 ?
content_loss = 0
for layer, weight in content_layers:
M = M_dict[layer[:5]]
P = tf.constant(dict_features_repr[layer])
F = net[layer]
content_loss += tf.nn.l2_loss(tf.subtract(P,F))*(
weight*weight_help_convergence/(length_content_layers*(tf.to_float(M)**2)))
return(content_loss)
def sum_style_losses(sess, net, dict_gram,M_dict,style_layers):
"""
Compute the style term of the loss function with Gram Matrix from the
Gatys Paper
Input :
- the tensforflow session sess
- the vgg19 net
- the dictionnary of Gram Matrices
- the dictionnary of the size of the image content through the net
"""
# Info for the vgg19
length_style_layers = float(len(style_layers))
weight_help_convergence = 10**(9) # This wight come from a paper of Gatys
# Because the function is pretty flat
total_style_loss = 0
for layer, weight in style_layers:
# For one layer
N = style_layers_size[layer[:5]]
A = dict_gram[layer]
A = tf.constant(A)
# Get the value of this layer with the generated image
M = M_dict[layer[:5]]
x = net[layer]
G = gram_matrix(x,N,M) # Nota Bene : the Gram matrix is normalized by M
style_loss = tf.nn.l2_loss(tf.subtract(G,A)) # output = sum(t ** 2) / 2
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
return(total_style_loss)
def texture_loss_wt_mask(sess, net, dict_gram,M_dict,mask_dict,style_layers):
"""
Multiply the first layer
"""
# Info for the vgg19
length_style_layers = float(len(style_layers))
weight_help_convergence = 10**(9) # This wight come from a paper of Gatys
# Because the function is pretty flat
total_style_loss = 0
for layer, weight in style_layers:
# For one layer
N = style_layers_size[layer[:5]]
NN = N**2
A = dict_gram[layer]
A = tf.constant(A)
# Get the value of this layer with the generated image
M = M_dict[layer[:5]]
x = net[layer]
G = gram_matrix(x,N,M) # Nota Bene : the Gram matrix is normalized by M
diff = tf.subtract(G,A)
mask = mask_dict[layer]
diff = tf.multiply(diff,mask)
NN = np.sum(mask)
print("Number of non null element in mask ",NN)
style_loss = tf.nn.l2_loss(diff) # output = sum(t ** 2) / 2
style_loss *= weight * weight_help_convergence / (2.*(NN)*length_style_layers)
total_style_loss += style_loss
return(total_style_loss)
def compute_4_moments(x):
"""
Compute the 4 first moments of the features (response of the kernel)
of a 4D Tensor
"""
# TODO : this is biased moment !!
mean_x = tf.reduce_mean(x, axis=[0,1,2])
variance_x = tf.subtract(tf.reduce_mean(tf.pow(x,2), axis=[0,1,2]),mean_x)
sig_x = tf.sqrt(variance_x)
skewness_x = tf.reduce_mean(tf.pow(tf.divide(tf.subtract(x,mean_x),sig_x),3), axis=[0,1,2])
kurtosis_x = tf.reduce_mean(tf.pow(tf.divide(tf.subtract(x,mean_x),sig_x),4), axis=[0,1,2])
return(mean_x,variance_x,skewness_x,kurtosis_x)
def compute_n_moments_reduce(x,n,axis=[0,1,2]):
"""
Compute the n first moments of the features (response of the kernel)
The moments are reduce and centered but beacareful for some texture and layer there are a risk of getting a nan
as result due to ssig_x that goes to 0 otherwise used compute_n_moments
"""
assert(n > 0)
mean_x = tf.reduce_mean(x,axis=axis)
list_of_moments = [mean_x]
if(n>1):
variance_x = tf.reduce_mean(tf.pow(tf.subtract(x,mean_x),2),axis=axis)
list_of_moments += [variance_x]
sig_x = tf.sqrt(variance_x)
if(n>2):
for r in range(3,n+1,1):
moment_r = tf.reduce_mean(tf.pow(tf.divide(tf.subtract(x,mean_x),sig_x),r), axis=axis) # Centré/réduit
tf.where(tf.is_nan(moment_r), tf.zeros_like(moment_r), moment_r )
list_of_moments += [moment_r]
return(list_of_moments)
def compute_n_moments(x,n,axis=[0,1,2]):
"""
Compute the n first moments of the features (response of the kernel)
Only the centered moments and not the centered reduced ones ! otherwise risk to have a nan number because of sig_x
"""
assert(n > 0)
mean_x = tf.reduce_mean(x,axis=axis)
list_of_moments = [mean_x]
if(n>1):
variance_x = tf.reduce_mean(tf.pow(tf.subtract(x,mean_x),2),axis=axis)
list_of_moments += [variance_x]
if(n>2):
for r in range(3,n+1,1):
#sig_x = tf.sqrt(variance_x)
sig_x = 1.
moment_r = tf.reduce_mean(tf.pow(tf.divide(tf.subtract(x,mean_x),sig_x),r), axis=axis) # Centré
tf.where(tf.is_nan(moment_r), tf.zeros_like(moment_r), moment_r )
# TODO : change that to some thing more optimal : pb computation of the power several times
list_of_moments += [moment_r]
return(list_of_moments)
def compute_Lp_norm(x,p):
"""
Compute the p first Lp norm of the features
"""
assert(p > 0)
list_of_Lp = []
for r in range(1,p+1,1):
L_r_x = tf.pow(tf.reduce_mean(tf.pow(tf.abs(x),r), axis=[0,1,2]),1./r)
#F_x = tf.reshape(x,[M_i_1,N_i_1])
#L_r_x =tf.norm(x,ord=r,axis=[0,1],name=str(r))
# TODO : change that to some thing more optimal : pb computation of the power several times
list_of_Lp += [L_r_x]
return(list_of_Lp)
def sum_style_stats_loss(sess,net,image_style,M_dict,style_layers):
"""
Compute a loss that is the l2 norm of the 4th moment of the optimization
"""
length_style_layers = float(len(style_layers))
weight_help_convergence = 10**9 # This wight come from a paper of Gatys
# Because the function is pretty flat
total_style_loss = 0
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
# For one layer
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer] # response to the layer
mean_x,variance_x,skewness_x,kurtosis_x = compute_4_moments(x)
mean_a,variance_a,skewness_a,kurtosis_a = compute_4_moments(a)
style_loss = tf.nn.l2_loss(tf.subtract(mean_x,mean_a)) + tf.nn.l2_loss(tf.subtract(variance_x,variance_a)) + tf.nn.l2_loss(tf.subtract(skewness_x,skewness_a)) + tf.nn.l2_loss(tf.subtract(kurtosis_x,kurtosis_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
return(total_style_loss)
def loss_n_moments(sess,net,image_style,M_dict,n,style_layers):
"""
Compute a loss that is the l2 norm of the nth moment of the optimization
"""
length_style_layers = float(len(style_layers))
weight_help_convergence = 10**9 # This wight come from a paper of Gatys
# Because the function is pretty flat
total_style_loss = 0
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
# For one layer
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer] # response to the layer
moments_x = compute_n_moments(x,n)
moments_a = compute_n_moments(a,n)
style_loss = sum(map(tf.nn.l2_loss,map(tf.subtract, moments_x,moments_a)))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers) # Normalized by the number of features N
total_style_loss += style_loss
return(total_style_loss)
def loss_n_stats(sess,net,image_style,M_dict,n,style_layers,TypeOfComputation='moments'):
"""
Compute a loss that is the l2 norm of the n element of a statistic on
the features maps : moments or norm
"""
length_style_layers = float(len(style_layers))
weight_help_convergence = 10**(9)
# Because the function is pretty flat
total_style_loss = 0
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
# For one layer
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer] # response to the layer
if(TypeOfComputation=='moments'):
stats_x = compute_n_moments(x,n)
stats_a = compute_n_moments(a,n)
elif(TypeOfComputation=='Lp'):
stats_x = compute_Lp_norm(x,n)
stats_a = compute_Lp_norm(a,n)
elif(TypeOfComputation=='nmoments_reduce'):
stats_x = compute_n_moments_reduce(x,n)
stats_a = compute_n_moments_reduce(a,n)
style_loss = sum(map(tf.nn.l2_loss,map(tf.subtract, stats_x,stats_a)))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers) # Normalized by the number of features N
total_style_loss += style_loss
return(total_style_loss)
def loss_variance(sess,net,image_style,M_dict,style_layers):
"""
Compute a loss that is the l2 norm of the variance between the two
image (reference and the one we are optimizing)
"""
length_style_layers = float(len(style_layers))
weight_help_convergence = 10**(9)
# Because the function is pretty flat
total_style_loss = 0
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
# For one layer
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer] # response to the layer
axis = [0,1,2]
mean_x,variance_x = tf.nn.moments(x,axes=axis)
mean_a,variance_a = tf.nn.moments(tf.convert_to_tensor(a, dtype=tf.float32),axes=axis)
style_loss = tf.nn.l2_loss(tf.subtract(variance_x,variance_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers) # Normalized by the number of features N
total_style_loss += style_loss
return(total_style_loss)
def loss_p_norm(sess,net,image_style,M_dict,p,style_layers): # Faire une fonction génértique qui prend en entree le type de norme !!!
length_style_layers = float(len(style_layers))
weight_help_convergence = 10**9 # This wight come from a paper of Gatys
# Because the function is pretty flat
total_style_loss = 0
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
# For one layer
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer] # response to the layer
L_p_x = compute_Lp_norm(x,p) # Les Lp sont positives, on cherche juste à egaliser les énergies la
L_p_a = compute_Lp_norm(a,p)
style_loss = sum(map(tf.nn.l2_loss,map(tf.subtract, L_p_x,L_p_a)))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers) # Normalized by the number of features N
total_style_loss += style_loss
return(total_style_loss)
def loss_crosscor_inter_scale(sess,net,image_style,M_dict,style_layers,sampling='down',pooling_type='avg'):
"""
Compute a loss that is the l2 norm of the cross correlation of the previous band
The sampling argument is down for downsampling and up for up sampling
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = 10**9 # This wight come from a paper of Gatys
# Because the function is pretty flat
total_style_loss = 0.
sess.run(net['input'].assign(image_style))
if(length_style_layers_int > 1):
for i in range(length_style_layers_int-1):
layer_i, weight_i = style_layers[i]
layer_i_1, weight_i_1 = style_layers[i+1]
N_i = style_layers_size[layer_i[:5]]
N_i_1 = style_layers_size[layer_i_1[:5]]
M_i_1 = M_dict[layer_i_1[:5]]
#print("M_i,M_i_1,N_i",M_i,M_i_1,N_i)
x_i = net[layer_i]
x_i_1 = net[layer_i_1]
a_i = sess.run(net[layer_i])
a_i_1 = sess.run(net[layer_i_1]) # TODO change this is suboptimal because youcompute twice a_i !!
if(sampling=='down'):
if(pooling_type=='avg'):
x_i = tf.nn.avg_pool(x_i, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),padding='SAME')
a_i = tf.nn.avg_pool(a_i, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),padding='SAME')
elif(pooling_type == 'max'):
x_i = tf.nn.max_pool(x_i, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),padding='SAME')
a_i = tf.nn.max_pool(a_i, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),padding='SAME')
_,height,width,_ = x_i.shape
M_i = tf.to_int32(height*width)
elif(sampling=='up'):
_,new_height,new_width,_ = x_i.shape
_,height,_,_ = x_i_1.shape
if(layer_i[:5]==layer_i_1[:5]):
factor = 1 # Not upsample
else:
factor = 2
upsample_filter_np = utils.bilinear_upsample_weights(factor,N_i_1)
x_i_1 = tf.nn.conv2d_transpose(x_i_1, upsample_filter_np,
output_shape=[1, tf.to_int32(new_height), tf.to_int32(new_width), N_i_1],
strides=[1, factor, factor, 1])
a_i_1 = tf.nn.conv2d_transpose(a_i_1, upsample_filter_np,
output_shape=[1, tf.to_int32(new_height), tf.to_int32(new_width), N_i_1],
strides=[1, factor, factor, 1])
M_i = tf.to_int32(new_height*new_width)
M_i_1 = M_i
F_x_i = tf.reshape(x_i,[M_i,N_i])
F_x_i_1 = tf.reshape(x_i_1,[M_i_1,N_i_1])
G_x = tf.matmul(tf.transpose(F_x_i),F_x_i_1)
G_x /= tf.to_float(M_i)
F_a_i = tf.reshape(a_i,[M_i,N_i])
F_a_i_1 = tf.reshape(a_i_1,[M_i_1,N_i_1])
G_a = tf.matmul(tf.transpose(F_a_i),F_a_i_1)
G_a /= tf.to_float(M_i)
style_loss = tf.nn.l2_loss(tf.subtract(G_x,G_a)) # output = sum(t ** 2) / 2
# TODO selon le type de style voulu soit reshape the style image sinon Mcontenu/Mstyle
weight= (weight_i + weight_i_1) /2.
style_loss *= weight * weight_help_convergence / (2.*(N_i*N_i_1)*length_style_layers)
total_style_loss += style_loss
return(total_style_loss)
def pgcd(a,b) :
while a%b != 0 :
a, b = b, a%b
return b
def loss_autocorrbizarre(sess,net,image_style,M_dict,style_layers):
"""
Computation of the autocorrelation of the filters
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = 10**9
total_style_loss = 0.
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer]
F_x = tf.fft2d(tf.complex(x,0.))
R_x = tf.real(tf.multiply(F_x,tf.conj(F_x)))
R_x /= tf.to_float(M**2)
F_a = tf.fft2d(tf.complex(a,0.))
R_a = tf.real(tf.multiply(F_a,tf.conj(F_a)))
R_a /= tf.to_float(M**2)
style_loss = tf.nn.l2_loss(tf.subtract(R_x,R_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
total_style_loss =tf.to_float(total_style_loss)
return(total_style_loss)
def loss_autocorr(sess,net,image_style,M_dict,style_layers):
"""
Computation of the autocorrelation of the filters
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = (10**9)
total_style_loss = 0.
_, h_a, w_a, N = image_style.shape
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer]
x = tf.transpose(x, [0,3,1,2])
a = tf.transpose(a, [0,3,1,2])
F_x = tf.fft2d(tf.complex(x,0.))
R_x = tf.real(tf.multiply(F_x,tf.conj(F_x))) # Module de la transformee de Fourrier : produit terme a terme
R_x /= tf.to_float(M**2) # Normalisation du module de la TF
F_a = tf.fft2d(tf.complex(a,0.))
R_a = tf.real(tf.multiply(F_a,tf.conj(F_a))) # Module de la transformee de Fourrier
R_a /= tf.to_float(M**2)
style_loss = tf.nn.l2_loss(tf.subtract(R_x,R_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
total_style_loss =tf.to_float(total_style_loss)
return(total_style_loss)
def loss_autocorrLog(sess,net,image_style,M_dict,style_layers):
"""
Computation of the autocorrelation of the filters normalise en passant
par le log
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = (10**9)
total_style_loss = 0.
_, h_a, w_a, N = image_style.shape
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer]
x = tf.transpose(x, [0,3,1,2])
a = tf.transpose(a, [0,3,1,2])
F_x = tf.fft2d(tf.complex(x,0.))
R_x = tf.pow(tf.real(tf.multiply(F_x,tf.conj(F_x))),0.5) # Module de la transformee de Fourrier : produit terme a terme
R_x /= tf.to_float(M) # Normalisation du module de la TF
R_x = tf.log(1+R_x)
F_a = tf.fft2d(tf.complex(a,0.))
R_a = tf.pow(tf.real(tf.multiply(F_a,tf.conj(F_a))),0.5) # Module de la transformee de Fourrier
R_a /= tf.to_float(M)
R_a = tf.log(1+R_a)
style_loss = tf.nn.l2_loss(tf.subtract(R_x,R_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
total_style_loss =tf.to_float(total_style_loss)
return(total_style_loss)
def loss_autocorr_rfft(sess,net,image_style,M_dict,style_layers):
"""
Computation of the autocorrelation of the filters
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = (10**9)
total_style_loss = 0.
_, h_a, w_a, N = image_style.shape
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer]
x = tf.transpose(x, [0,3,1,2])
a = tf.transpose(a, [0,3,1,2])
F_x = tf.spectral.rfft2d(x)
F_a = tf.spectral.rfft2d(a)
#R_x = tf.abs(F_x) # Module de la transformee de Fourrier : produit terme a terme
#R_x /= tf.to_float(M) # Normalisation du module de la TF
#R_a = tf.abs(F_a) # Module de la transformee de Fourrier
#R_a /= tf.to_float(M)
R_x = tf.real(tf.multiply(F_x,tf.conj(F_x))) # Module de la transformee de Fourrier : produit terme a terme
R_x /= tf.to_float(M**2) # Normalisation du module de la TF
R_a = tf.real(tf.multiply(F_a,tf.conj(F_a))) # Module de la transformee de Fourrier
R_a /= tf.to_float(M**2)
style_loss = tf.nn.l2_loss(tf.subtract(R_x,R_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
total_style_loss =tf.to_float(total_style_loss)
return(total_style_loss)
def loss_fft_vect(sess,net,image_style,M_dict,style_layers):
"""
Computation of the autocorrelation of the filters consider as vector
kind of vectorized FFT
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = (10**9)
total_style_loss = 0.
_, h_a, w_a, N = image_style.shape
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer]
x = tf.transpose(x, [0,3,1,2])
a = tf.transpose(a, [0,3,1,2])
F_x = tf.fft2d(tf.complex(x,0.))
R_x = tf.real(tf.multiply(F_x,tf.conj(F_x))) # Module de la transformee de Fourrier : produit terme a terme
R_x /= tf.to_float(M**2) # Normalisation du module de la TF
R_x = tf.reduce_mean(R_x,axis=1)
F_a = tf.fft2d(tf.complex(a,0.))
R_a = tf.real(tf.multiply(F_a,tf.conj(F_a))) # Module de la transformee de Fourrier
R_a /= tf.to_float(M**2)
R_a = tf.reduce_mean(R_a,axis=1)
style_loss = tf.nn.l2_loss(tf.subtract(R_x,R_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
total_style_loss =tf.to_float(total_style_loss)
return(total_style_loss)
def loss_entropy(sess,net,image_style,M_dict,style_layers):
"""
Computation of the entropy of the filters
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = (10**9)
total_style_loss = 0.
_, h_a, w_a, N = image_style.shape
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer]
a = tf.nn.l2_normalize(a, 3)
x = tf.nn.l2_normalize(x, 3)
entropy_a = tf.reduce_mean(-tf.multiply(a,tf.log(a)),axis=(0,1,2))
entropy_x = tf.reduce_mean(-tf.multiply(x,tf.log(x)),axis=(0,1,2))
style_loss = tf.nn.l2_loss(tf.subtract(entropy_x,entropy_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
total_style_loss =tf.to_float(total_style_loss)
return(total_style_loss)
def compute_ImagePhaseAlea(sess,net,image_style,M_dict,style_layers):
"""
Add a random phase to the features of the image style at the last
layer in the network
"""
sess.run(net['input'].assign(image_style))
image_style_PhaseAlea = {}
layer, weight = style_layers[-1]
a = sess.run(net[layer])
b, h_a, w_a, N = a.shape
at = tf.transpose(a, [0,3,1,2])
F_a = tf.fft2d(tf.complex(at,0.))
white_noise = np.random.normal(loc=0.0, scale=1.0, size=(h_a, w_a)).astype('float32') # Le bruit blanc doit etre gaussien
F_white_noise = tf.fft2d(tf.complex(white_noise,0.))
F_white_noise_modulus_inverse = tf.pow(tf.multiply(F_white_noise,tf.conj(F_white_noise)),-0.5)
F_white_noise_phase = tf.multiply(F_white_noise,F_white_noise_modulus_inverse) # Respect Hermitian Symetry
#F_white_noise_phase = np.ones((h_a, w_a))
output_list = []
for i in range(N):
output_list.append(tf.multiply(F_a[0,i,:,:],F_white_noise_phase))
F_a_new_phase = tf.stack(output_list)
F_a_new_phase = tf.expand_dims(F_a_new_phase,axis=0)
imF = tf.ifft2d(F_a_new_phase)
imF = tf.real(tf.transpose(imF, [0,2,3,1]))
image_style_PhaseAlea[layer] = sess.run(imF)
# Test pour verifier que le module de l image de style est bien respecter
imF_t = tf.transpose(imF, [0,3,1,2])
F_imF_t = tf.fft2d(tf.complex(imF_t,0.))
F_a_modulus = tf.real(tf.multiply(F_a,tf.conj(F_a)))
F_a_modulus_num = sess.run(F_a_modulus)
F_a_modulus_max = np.max(F_a_modulus_num)
imF_modulus = tf.real(tf.multiply(F_imF_t,tf.conj(F_imF_t)))
diff = tf.subtract(F_a_modulus,imF_modulus)
diff = sess.run(diff)
max_diff = np.max(diff)/F_a_modulus_max
if(max_diff > 10**(-4)):
print("The modulus of the image style features at the last layer is not preserved ! max_diff =",max_diff)
raise Exception
return(image_style_PhaseAlea)
def loss_PhaseAleatoire(sess,net,image_style,image_style_PhaseAlea,M_dict,style_layers):
"""
In this loss function we impose the TF transform to the last layer
with a random phase imposed and only the spectrum of the others layers
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = 10**9
total_style_loss = 0.
last_style_layers,_ = style_layers[-1]
alpha = 1
for layer, weight in style_layers:
if(last_style_layers==layer):
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
x = net[layer]
a_phase_alea = image_style_PhaseAlea[layer]
loss = tf.nn.l2_loss(tf.subtract(x,a_phase_alea))
loss *= alpha * weight * weight_help_convergence /(2.*(N**2)*tf.to_float(M**2)*length_style_layers)
total_style_loss += loss
else:
sess.run(net['input'].assign(image_style))
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer]
x = tf.transpose(x, [0,3,1,2])
a = tf.transpose(a, [0,3,1,2])
F_x = tf.fft2d(tf.complex(x,0.))
R_x = tf.real(tf.multiply(F_x,tf.conj(F_x))) # Module de la transformee de Fourrier : produit terme a terme
R_x /= tf.to_float(M**2) # Normalisation du module de la TF
F_a = tf.fft2d(tf.complex(a,0.))
R_a = tf.real(tf.multiply(F_a,tf.conj(F_a))) # Module de la transformee de Fourrier
R_a /= tf.to_float(M**2)
style_loss = tf.nn.l2_loss(tf.subtract(R_x,R_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
total_style_loss =tf.to_float(total_style_loss)
return(total_style_loss)
def loss_PhaseAleatoireSimple(sess,net,image_style,image_style_PhaseAlea,M_dict,style_layers):
"""
In this loss function we impose the TF transform to the last layer
with a random phase imposed that's all
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = 10**9
total_style_loss = 0.
last_style_layers,_ = style_layers[-1]
alpha = 1
for layer, weight in style_layers:
if(last_style_layers==layer):
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
x = net[layer]
a_phase_alea = image_style_PhaseAlea[layer]
loss = tf.nn.l2_loss(tf.subtract(x,a_phase_alea))
loss *= alpha * weight * weight_help_convergence /(2.*(N**2)*tf.to_float(M**2)*length_style_layers)
total_style_loss += loss
total_style_loss =tf.to_float(total_style_loss)
return(total_style_loss)
def compute_ImagePhaseAlea_list(sess,net,image_style,M_dict,style_layers):
"""
Add a random phase to the features of the image style at the last
layer in the network
"""
b, h, w, N = image_style.shape
white_noise_img_bgr = np.random.normal(loc=0.0, scale=1.0, size=(h, w)).astype('float32')
white_noise_img_bgr = np.stack((white_noise_img_bgr,white_noise_img_bgr,white_noise_img_bgr),axis=2)
white_noise_img_bgr_tf = preprocess(white_noise_img_bgr)
sess.run(net['input'].assign(white_noise_img_bgr_tf))
dict = {}
for layer,_ in style_layers:
white_noise = sess.run(net[layer])
white_noise = tf.transpose(white_noise, [0,3,1,2])
F_white_noise = tf.fft2d(tf.complex(white_noise,0.))
F_white_noise_modulus_inverse = tf.pow(tf.multiply(F_white_noise,tf.conj(F_white_noise)),-0.5)
F_white_noise_phase = tf.multiply(F_white_noise,F_white_noise_modulus_inverse) # Respect Hermitian Symetry
dict[layer] = F_white_noise_phase
sess.run(net['input'].assign(image_style))
image_style_PhaseAlea = {}
for layer,_ in style_layers:
a = sess.run(net[layer])
at = tf.transpose(a, [0,3,1,2])
F_a = tf.fft2d(tf.complex(at,0.))
F_a_new_phase = tf.multiply(F_a,dict[layer])
imF = tf.ifft2d(F_a_new_phase)
imF = tf.real(tf.transpose(imF, [0,2,3,1]))
image_style_PhaseAlea[layer] = sess.run(imF)
return(image_style_PhaseAlea)
def loss_PhaseAleatoirelist(sess,net,image_style,image_style_PhaseAlea,M_dict,style_layers):
"""
In this loss function we impose the TF transform to the last layer
with a random phase imposed and only the spectrum of the
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = 10**9
total_style_loss = 0.
alpha = 1
print("Phase aleatoire List !")
for layer, weight in style_layers:
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
x = net[layer]
a_phase_alea = image_style_PhaseAlea[layer]
loss = tf.nn.l2_loss(tf.subtract(x,a_phase_alea))
loss *= alpha * weight * weight_help_convergence /(2.*(N**2)*tf.to_float(M**2)*length_style_layers)
total_style_loss += loss
total_style_loss =tf.to_float(total_style_loss)
return(total_style_loss)
def loss_PhaseImpose(sess,net,image_style,M_dict,style_layers):
"""
TODO !!!
"""
print("Here ")
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = 10**9
total_style_loss = 0.
last_style_layers,_ = style_layers[0]
print(last_style_layers)
sess.run(net['input'].assign(image_style))
alpha = 10**(13)
for layer, weight in style_layers:
# contrainte sur le module uniquement
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer]
x_t = tf.transpose(x, [0,3,1,2])
a_t = tf.transpose(a, [0,3,1,2])
F_x = tf.fft2d(tf.complex(x_t,0.))
F_a = tf.fft2d(tf.complex(a_t,0.))
if(last_style_layers==layer):
## Contrainte sur la phase
#angle_a = angle(F_a)
##angle_a_shiftdim1 = tf.concat([tf.expand_dims(angle_a[:,-1,:,:],0), angle_a[:,:-1,:,:]], axis=1)
##angle_a_prim = angle_a - angle_a_shiftdim1
##angle_a_shiftdim2 = tf.concat([tf.expand_dims(angle_a_prim[:,:,-1,:],axis=2), angle_a_prim[:,:,:-1,:]], axis=2)
##angle_a_prim = angle_a_prim - angle_a_shiftdim2
##angle_a_shiftdim3 = tf.concat([tf.expand_dims(angle_a_prim[:,:,:,-1],axis=3), angle_a_prim[:,:,:,:-1]], axis=3)
##angle_a_prim = angle_a_prim - angle_a_shiftdim3
#angle_x = angle(F_x)
##angle_x_shiftdim1 = tf.concat([tf.expand_dims(angle_x[:,-1,:,:],0), angle_x[:,:-1,:,:]], axis=1)
##angle_x_prim = angle_x - angle_x_shiftdim1
##angle_x_shiftdim2 = tf.concat([tf.expand_dims(angle_x_prim[:,:,-1,:],axis=2), angle_x_prim[:,:,:-1,:]], axis=2)
##angle_x_prim = angle_x_prim - angle_x_shiftdim2
##angle_x_shiftdim3 = tf.concat([tf.expand_dims(angle_x_prim[:,:,:,-1],axis=3), angle_x_prim[:,:,:,:-1]], axis=3)
##angle_x_prim = angle_x_prim - angle_x_shiftdim3
#angle_x /= tf.to_float(M)
#angle_a /= tf.to_float(M)
#style_loss = tf.nn.l2_loss(tf.subtract(angle_x,angle_a))
#style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
#total_style_loss += style_loss
# Correlation de phase ?
#innerProd = tf.multiply(F_x,tf.conj(F_a)) # sum(ftIm .* conj(ftRef), 3);
#innerProd /= M**2
#module_InnerProd = tf.pow(tf.real(tf.multiply(innerProd,tf.conj(innerProd))),0.5)
#module_innerProd_less_1 = tf.pow(tf.pow(tf.real(tf.multiply(innerProd,tf.conj(innerProd)))-1,2),0.5)
#style_loss = tf.reduce_sum(tf.multiply(module_InnerProd,module_innerProd_less_1))
angle_x = tf.real(angle(F_x))
angle_a = tf.real(angle(F_a))
fft2_angle_x = tf.fft2d(tf.complex(angle_x,0.))
fft2_angle_a = tf.fft2d(tf.complex(angle_a,0.))
R_angle_x = tf.real(tf.multiply(fft2_angle_x,tf.conj(fft2_angle_x)))
R_angle_a = tf.real(tf.multiply(fft2_angle_a,tf.conj(fft2_angle_a)))
R_angle_a /= tf.to_float(M**2)
R_angle_x /= tf.to_float(M**2)
style_loss = tf.nn.l2_loss(tf.subtract(R_angle_x,R_angle_a))
#dephase = tf.divide(innerProd,module_InnerProd)
#ftNew = tf.multiply(dephase,F_x)
#imF = tf.ifft2d(ftNew)
#imF = tf.real(tf.transpose(imF, [0,2,3,1]))
#loss = tf.nn.l2_loss(tf.subtract(x,imF)) # sum (x**2)/2
style_loss *= alpha* weight * weight_help_convergence /((2.*(N**2)*length_style_layers))
total_style_loss += style_loss
if True:
R_x = tf.real(tf.multiply(F_x,tf.conj(F_x))) # Module de la transformee de Fourrier : produit terme a terme
R_x /= tf.to_float(M) # Normalisation du module de la TF
R_a = tf.real(tf.multiply(F_a,tf.conj(F_a))) # Module de la transformee de Fourrier
R_a /= tf.to_float(M)
style_loss = tf.nn.l2_loss(tf.subtract(R_x,R_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
total_style_loss =tf.to_float(total_style_loss)
return(total_style_loss)
def angle(z):
"""
Returns the elementwise arctan of z, choosing the quadrant correctly.
Quadrant I: arctan(y/x)
Qaudrant II: π + arctan(y/x) (phase of x<0, y=0 is π)
Quadrant III: -π + arctan(y/x)
Quadrant IV: arctan(y/x)
Inputs:
z: tf.complex64 or tf.complex128 tensor
Retunrs:
Angle of z
"""
if z.dtype == tf.complex128:
dtype = tf.float64
else:
dtype = tf.float32
x = tf.real(z)
y = tf.imag(z)
xneg = tf.cast(x < 0.0, dtype)
yneg = tf.cast(y < 0.0, dtype)
ypos = tf.cast(y >= 0.0, dtype)
offset = xneg * (ypos - yneg) * np.pi
return tf.atan(y / x) + offset
def loss_intercorr(sess,net,image_style,M_dict,style_layers):
"""
Computation of the correlation of the filter and the interaction
long distance of the features : intercorrelation
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = (10**9)
total_style_loss = 0.
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
print(layer)
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer]
x = tf.transpose(x, [0,3,1,2])
a = tf.transpose(a, [0,3,1,2])
F_x = tf.fft2d(tf.complex(x,0.))
F_x_conj = tf.conj(F_x)
F_a = tf.fft2d(tf.complex(a,0.))
F_a_conj = tf.conj(F_a)
#NN = 2
#alpha = 10
#R_x = tf.real(tf.ifft2d(tf.multiply(F_x,F_x_conj)))
#R_a = tf.real(tf.ifft2d(tf.multiply(F_a,F_a_conj)))
#R_x /= tf.to_float(M**2)
#R_a /= tf.to_float(M**2)
#style_loss = tf.nn.l2_loss(tf.subtract(R_x,R_a))
#style_loss *= alpha * weight * weight_help_convergence / (2.*(NN**4)*length_style_layers)
#total_style_loss += style_loss
#lenRoll = sess.run(tf.random_uniform(minval=0,maxval=N,dtype=tf.int32,shape=[1])) # Between [minval,maxval)
#print(lenRoll)
#F_x = tf.concat([F_x[:,lenRoll:,:,:], F_x[:,:lenRoll,:,:]], axis=1)
#F_a = tf.concat([F_a[:,lenRoll:,:,:], F_a[:,:lenRoll,:,:]], axis=1)
#R_x = tf.real(tf.ifft2d(tf.multiply(F_x,F_x_conj)))
#R_a = tf.real(tf.ifft2d(tf.multiply(F_a,F_a_conj)))
#R_x /= tf.to_float(M**2)
#R_a /= tf.to_float(M**2)
#style_loss = tf.nn.l2_loss(tf.subtract(R_x,R_a))
#style_loss *= weight * weight_help_convergence / (2.*(NN**4)*length_style_layers)
#total_style_loss += style_loss
#lenRoll = sess.run(tf.random_uniform(minval=0,maxval=N,dtype=tf.int32,shape=[1]))
#print(lenRoll)
NN = N
for i in range(NN):
R_x = tf.real(tf.ifft2d(tf.multiply(F_x,F_x_conj)))
R_a = tf.real(tf.ifft2d(tf.multiply(F_a,F_a_conj)))
R_x /= tf.to_float(M**2)
R_a /= tf.to_float(M**2)
style_loss = tf.nn.l2_loss(tf.subtract(R_x,R_a))
style_loss *= weight * weight_help_convergence / (2.*(NN**4)*length_style_layers)
total_style_loss += style_loss
#F_x = tf.stack([F_x[:,-1,:,:], F_x[:,:-1,:,:]], axis=1)
F_x = tf.concat([tf.expand_dims(F_x[:,-1,:,:],0), F_x[:,:-1,:,:]], axis=1)
#F_a = tf.stack([F_a[:,-1,:,:], F_a[:,:-1,:,:]], axis=1)
F_a = tf.concat([tf.expand_dims(F_a[:,-1,:,:],0), F_a[:,:-1,:,:]], axis=1)
return(total_style_loss)
def loss_SpectrumOnFeatures(sess,net,image_style,M_dict,style_layers):
"""
In this loss function we impose the spectrum on each features
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = 10**9
total_style_loss = 0.
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
x = net[layer]
x_transpose = tf.transpose(x, [0,3,1,2])
a = tf.transpose(a, [0,3,1,2])
F_x = tf.fft2d(tf.complex(x_transpose,0.))
F_a = tf.fft2d(tf.complex(a,0.))
innerProd = tf.multiply(F_x,tf.conj(F_a)) # sum(ftIm .* conj(ftRef), 3);
module_InnerProd = tf.pow(tf.multiply(innerProd,tf.conj(innerProd)),0.5)
dephase = tf.divide(innerProd,module_InnerProd)
ftNew = tf.multiply(dephase,F_x)
imF = tf.ifft2d(ftNew)
imF = tf.real(tf.transpose(imF, [0,2,3,1]))
loss = tf.nn.l2_loss(tf.subtract(x,imF)) # sum (x**2)/2
loss *= weight * weight_help_convergence /(M*3*(2.*(N**2)*length_style_layers))
total_style_loss += loss
total_style_loss =tf.to_float(total_style_loss)
return(total_style_loss)
def loss_fft3D(sess,net,image_style,M_dict,style_layers):
"""
Computation of the 3-dimensional discrete Fourier Transform over the
inner-most 3 dimensions of input i.e. height,width,channel :)
"""
# TODO : change the M value attention !!! different size between a and x maybe
length_style_layers_int = len(style_layers)
length_style_layers = float(length_style_layers_int)
weight_help_convergence = 10**9
total_style_loss = 0.
x_temp = {}
sess.run(net['input'].assign(image_style))
for layer, weight in style_layers:
N = style_layers_size[layer[:5]]
M = M_dict[layer[:5]]
a = sess.run(net[layer])
#R_a = (ifft2(fft2(a) * fft2(a).conj()).real)/M
#R_x = x_temp[layer]
x = net[layer]
F_x = tf.fft3d(tf.complex(x,0.))
#print(F_x.shape)
R_x = tf.real(tf.multiply(F_x,tf.conj(F_x)))
R_x /= tf.to_float(M**2)
#print(R_x.shape)
F_a = tf.fft3d(tf.complex(a,0.))
R_a = tf.real(tf.multiply(F_a,tf.conj(F_a)))
R_a /= tf.to_float(M**2)
style_loss = tf.nn.l2_loss(tf.subtract(R_x,R_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
total_style_loss =tf.to_float(total_style_loss)
return(total_style_loss)
def loss_spectrum(sess,net,image_style,M_dict):
"""
Computation of the spectrum loss from Gang Liu
https://arxiv.org/pdf/1605.01141.pdf
"""
eps = 0.001
beta = 10**5 # Value from the paper
M = M_dict['input']
x = net['input']
a = tf.transpose(image_style, [0,3,1,2])
x_t = tf.transpose(x, [0,3,1,2])
F_x = tf.fft2d(tf.complex(x_t,0.))
#F_a = tf.reduce_sum(tf.fft2d(tf.complex(a,0.)),1, keep_dims=True)
F_a = tf.fft2d(tf.complex(a,0.))
#innerProd = tf.reduce_sum( tf.multiply(F_x,tf.conj(F_a)), 1, keep_dims=True ) # sum(ftIm .* conj(ftRef), 3);
innerProd = tf.multiply(F_x,tf.conj(F_a)) # sum(ftIm .* conj(ftRef), 3);
module_InnerProd = tf.pow(tf.multiply(innerProd,tf.conj(innerProd)),0.5)
dephase = tf.divide(innerProd,module_InnerProd+eps)
ftNew = tf.multiply(dephase,F_x)
imF = tf.ifft2d(ftNew)
imF = tf.real(tf.transpose(imF, [0,2,3,1]))
loss = tf.nn.l2_loss(tf.subtract(x,imF)) # sum (x**2)/2
#loss *= beta/(M*3) # Need to be checked by Said or Yann TODO !
loss *= beta/(M*2)
return(loss)
def loss__HF_filter(sess, net, image_style,M_dict):
"""
With the function we add a kind of HF Filter to some layer in order
to remove some artefacts
"""
weight_help_convergence = 10**(9) # This wight come from a paper of Gatys
sizeKernel = 3
zeros = np.zeros(shape=(sizeKernel,sizeKernel)).astype('float32')
k = np.array([[ 1, -1, 1],
[-1, 1, -1],
[ 1, -1, 1]]).astype('float32')
k /= 9.
# imshow(abs(fftshift(fft2(a,256,256))),[]) pour connaitre le spectre sous matlab
total_style_loss = 0.
style_layers_local = [('input',1),('pool1',1),('pool2',1),('pool3',1),('pool4',1)]
length_style_layers = len(style_layers_local)
style_layers_local_size = {'input' : 3,'pool1' : 64,'pool2' : 128,'pool3' : 256,'pool4' : 512}
for layer, weight in style_layers_local:
weight =1.0
input_x = net[layer]
kernels = None
for j in range(style_layers_local_size[layer]):
kernels_tmp = np.zeros(shape=(sizeKernel,sizeKernel,style_layers_local_size[layer])).astype('float32')
kernels_tmp[:,:,j] = k #/math.sqrt(sizeKernel*sizeKernel*style_layers_local_size[layer])
kernels_tmp = np.expand_dims(kernels_tmp,axis=3)
if(kernels is None):
kernels = kernels_tmp
else:
kernels = np.concatenate((kernels,kernels_tmp),axis=3)
_,_,_,N = kernels.shape
kernels = tf.constant(kernels)
conv_x = tf.nn.conv2d(input_x, kernels, strides=(1, 1, 1, 1),
padding='SAME',name='conv')
sess.run(net['input'].assign(image_style))
input_a = sess.run(net[layer])
conv_a = tf.nn.conv2d(input_a, kernels, strides=(1, 1, 1, 1),
padding='SAME',name='conv')
M = M_dict[layer[:5]]
G_x = gram_matrix(conv_x,N,M)
G_a = gram_matrix(conv_a,N,M)
style_loss = tf.nn.l2_loss(tf.subtract(G_x,G_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
return(total_style_loss)
def loss__HF_many_filters2(sess, net, image_style,M_dict):
"""
test avec d'autres filtres passe haut
"""
weight_help_convergence = 10**(9) # This wight come from a paper of Gatys
# Because the function is pretty flat
total_style_loss = 0
zeros = np.zeros(shape=(3,3)).astype('float32')
#Filter en X
list_kernel = []
k = np.array([[ 1, -1, 1],
[-1, 1, -1],
[ 1, -1, 1]]).astype('float32')
k = k/9.
list_kernel += [k]
# Filtre selon x
k = np.array([[ -1, 1, 0],
[-1, 1, 0],
[ -1, 1, 0]]).astype('float32')
k = k/9.
list_kernel += [k]
# Filtre selon y
k = np.array([[ 0, 0, 0],
[1, 1, 1],
[ -1, -1, -1]]).astype('float32')
k = k/9.
list_kernel += [k]
# Filtre Laplacien
k = np.array([[ 0, -1/4, 0],
[-1./4., 1, -1./4.],
[ 0, -1./4., 0]]).astype('float32')
k = k/9.
list_kernel += [k]
#k = np.array([[ 0, -1./4., 0],
#[-1./4, 2, -1./4],
#[ 0, -1./4, 0]]).astype('float32')
#k = k/9.
ratio = 1./len(list_kernel)
weight = 10**(-2)
style_layers_local = [('input',1),('pool1',1),('pool2',1),('pool3',1),('pool4',1)]
length_style_layers = len(style_layers_local)
style_layers_local_size = {'input' : 3,'pool1' : 64,'pool2' : 128,'pool3' : 256,'pool4' : 512}
for layer, weight in style_layers_local:
for kernel in list_kernel:
k = kernel
input_x = net[layer]
kernels = None
for j in range(style_layers_local_size[layer]):
kernels_tmp = np.zeros(shape=(3,3,style_layers_local_size[layer])).astype('float32')
kernels_tmp[:,:,j] = k
kernels_tmp = np.expand_dims(kernels_tmp,axis=3)
if(kernels is None):
kernels = kernels_tmp
else:
kernels = np.concatenate((kernels,kernels_tmp),axis=3)
_,_,_,N = kernels.shape
kernels = tf.constant(kernels)
conv_x = tf.nn.conv2d(input_x, kernels, strides=(1, 1, 1, 1),
padding='SAME',name='conv')
sess.run(net['input'].assign(image_style))
input_a = sess.run(net[layer])
conv_a = tf.nn.conv2d(input_a, kernels, strides=(1, 1, 1, 1),
padding='SAME',name='conv')
M = M_dict[layer[:5]]
G_x = gram_matrix(conv_x,N,M)
G_a = gram_matrix(conv_a,N,M)
style_loss = tf.nn.l2_loss(tf.subtract(G_x,G_a))
style_loss *= ratio* weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
return(total_style_loss)
def loss__HF_many_filters(sess, net, image_style,M_dict):
weight_help_convergence = 10**(9) # This wight come from a paper of Gatys
# Because the function is pretty flat
total_style_loss = 0
sizeKernel = 3
zeros = np.zeros(shape=(sizeKernel,sizeKernel)).astype('float32')
k = np.array([[ 1, -1, 1],
[-1, 1, -1],
[ 1, -1, 1]]).astype('float32')
#k /= 9.
style_layers_local = [('input',1),('pool1',1),('pool2',1),('pool3',1),('pool4',1)]
length_style_layers = len(style_layers_local)
style_layers_local_size = {'input' : 3,'pool1' : 64,'pool2' : 128,'pool3' : 256,'pool4' : 512}
for layer, weight in style_layers_local:
weight =1.0
input_x = net[layer]
kernels = None
for j in range(style_layers_local_size[layer]):
kernels_tmp = np.zeros(shape=(sizeKernel,sizeKernel,style_layers_local_size[layer])).astype('float32')
kernels_tmp[:,:,j] = k/math.sqrt(sizeKernel*sizeKernel*style_layers_local_size[layer])
kernels_tmp = np.expand_dims(kernels_tmp,axis=3)
if(kernels is None):
kernels = kernels_tmp
else:
kernels = np.concatenate((kernels,kernels_tmp),axis=3)
_,_,_,N = kernels.shape
kernels = tf.constant(kernels)
conv_x = tf.nn.conv2d(input_x, kernels, strides=(1, 1, 1, 1),
padding='SAME',name='conv')
sess.run(net['input'].assign(image_style))
input_a = sess.run(net[layer])
conv_a = tf.nn.conv2d(input_a, kernels, strides=(1, 1, 1, 1),
padding='SAME',name='conv')
M = M_dict[layer[:5]]
G_x = gram_matrix(conv_x,N,M)
G_a = gram_matrix(conv_a,N,M)
style_loss = tf.nn.l2_loss(tf.subtract(G_x,G_a))
style_loss *= weight * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
if(layer=='input'):
weight2 = 10**(-5)
k2 = np.ones(shape=(sizeKernel,sizeKernel)).astype('float32')
k2 /= 9.
kernels = None
for j in range(style_layers_local_size[layer]):
kernels_tmp = np.zeros(shape=(sizeKernel,sizeKernel,style_layers_local_size[layer])).astype('float32')
kernels_tmp[:,:,j] = k2
kernels_tmp[:,:,(j+1)%(style_layers_local_size[layer])] = -k2
kernels_tmp = np.expand_dims(kernels_tmp,axis=3)
if(kernels is None):
kernels = kernels_tmp
else:
kernels = np.concatenate((kernels,kernels_tmp),axis=3)
_,_,_,N = kernels.shape
kernels = tf.constant(kernels)
conv_x = tf.nn.conv2d(input_x, kernels, strides=(1, 1, 1, 1),
padding='SAME',name='conv')
sess.run(net['input'].assign(image_style))
input_a = sess.run(net[layer])
conv_a = tf.nn.conv2d(input_a, kernels, strides=(1, 1, 1, 1),
padding='SAME',name='conv')
M = M_dict[layer[:5]]
G_x = gram_matrix(conv_x,N,M)
G_a = gram_matrix(conv_a,N,M)
style_loss = tf.nn.l2_loss(tf.subtract(G_x,G_a))
style_loss *= weight2 * weight_help_convergence / (2.*(N**2)*length_style_layers)
total_style_loss += style_loss
return(total_style_loss)
def sum_total_variation_losses(sess, net):
"""
denoising loss function, this function come from :
https://github.com/cysmith/neural-style-tf/blob/master/neural_style.py
"""
x = net['input']
weight_help_convergence = 1.
alpha = 10**(-6) # In order to not destroy the image to a constance
[b, h, w, d] = x.get_shape()
b, h, w, d = tf.to_int32(b),tf.to_int32(h),tf.to_int32(w),tf.to_int32(d)
tv_y_size = tf.to_float(b * (h-1) * w * d) # Nombre de pixels
tv_x_size = tf.to_float(b * h * (w-1) * d)
loss_y = tf.nn.l2_loss(x[:,1:,:,:] - x[:,:-1,:,:])
loss_y /= tv_y_size
loss_x = tf.nn.l2_loss(x[:,:,1:,:] - x[:,:,:-1,:])
loss_x /= tv_x_size
loss = 2 *alpha * weight_help_convergence * (loss_y + loss_x)
loss = tf.cast(loss, tf.float32)
return(loss)
def sum_total_variation_TF(sess, net):
"""
Autre possibilité pour arriver au meme que sum_total_variation_losses
https://www.tensorflow.org/api_docs/python/tf/image/total_variation
"""
x = net['input']
weight_help_convergence = 1.
alpha = 10**(-6) # In order to not destroy the image to a constance
loss = weight_help_convergence* alpha * tf.reduce_sum(tf.image.total_variation(x))
return(loss)
def sum_total_variation_losses_norm1(sess, net):
"""
denoising loss function, this function come from :
https://github.com/cysmith/neural-style-tf/blob/master/neural_style.py
"""
x = net['input']
weight_help_convergence = 1.
alpha = 10**(-6) # In order to not destroy the image to a constance
[b, h, w, d] = x.get_shape()
b, h, w, d = tf.to_int32(b),tf.to_int32(h),tf.to_int32(w),tf.to_int32(d)
tv_y_size = tf.to_float(b * (h-1) * w * d) # Nombre de pixels
tv_x_size = tf.to_float(b * h * (w-1) * d)
loss_y = tf.reduce_sum(tf.abs(x[:,1:,:,:] - x[:,:-1,:,:]))
loss_y /= tv_y_size
loss_x = tf.reduce_sum(tf.abs(x[:,:,1:,:] - x[:,:,:-1,:]))
loss_x /= tv_x_size
loss = 2 *alpha * weight_help_convergence * (loss_y + loss_x)
loss = tf.cast(loss, tf.float32)
return(loss)
def gram_matrix(x,N,M):
"""
Computation of the Gram Matrix for one layer we normalize with the
number of pixels M
Warning the way to compute the Gram Matrix is different from the paper
but it is equivalent, we use here the F matrix with the shape M*N
That's quicker
"""
# The implemented version is quicker than this one :
#x = tf.transpose(x,(0,3,1,2))
#F = tf.reshape(x,[tf.to_int32(N),tf.to_int32(M)])
#G = tf.matmul(F,tf.transpose(F))
F = tf.reshape(x,[M,N])
G = tf.matmul(tf.transpose(F),F)
G /= tf.to_float(M)
# That come from Control paper
return(G)
def get_Gram_matrix(vgg_layers,image_style,pooling_type='avg',padding='SAME'):
"""
Computation of all the Gram matrices from one image thanks to the
vgg_layers
"""
dict_gram = {}
net = net_preloaded(vgg_layers, image_style,pooling_type,padding) # net for the style image
sess = tf.Session()
sess.run(net['input'].assign(image_style))
a = net['input']
_,height,width,N = a.shape
M = height*width
A = gram_matrix(a,tf.to_int32(N),tf.to_int32(M)) # TODO Need to divided by M ????
dict_gram['input'] = sess.run(A)
for layer in VGG19_LAYERS:
a = net[layer]
_,height,width,N = a.shape
M = height*width
A = gram_matrix(a,tf.to_int32(N),tf.to_int32(M)) # TODO Need to divided by M ????
dict_gram[layer] = sess.run(A) # Computation
sess.close()
tf.reset_default_graph() # To clear all operation and variable
return(dict_gram)
def get_features_repr(vgg_layers,image_content,pooling_type='avg',padding='SAME'):
"""
Compute the image content representation values according to the vgg
19 net
"""
net = net_preloaded(vgg_layers, image_content,pooling_type,padding) # net for the content image
sess = tf.Session()
sess.run(net['input'].assign(image_content))
dict_features_repr = {}
for layer in VGG19_LAYERS:
P = sess.run(net[layer])
dict_features_repr[layer] = P # Computation
sess.close()
tf.reset_default_graph() # To clear all operation and variable
return(dict_features_repr)
def get_clip_values(image_style=None,BGR=False):
"""
Return the max and min value for the clipping and other find in the
optimization
If BGR is True then we return 2 vectors of 3 elements
"""
if(image_style is None):
# Max et min value from the ImageNet databse mean
clip_value_min=-124.
clip_value_max=152.
elif(BGR==False):
clip_value_max = np.max(image_style)
clip_value_min = np.min(image_style)
elif(BGR==True):
clip_value_min = np.min(image_style,axis=(0,1,2)) # along the BGR axis
clip_value_max = np.max(image_style,axis=(0,1,2)) # along the BGR axis
return(clip_value_min,clip_value_max)
def preprocess(img):
"""
This function takes a RGB image and process it to be used with
tensorflow
"""
# shape (h, w, d) to (1, h, w, d)
img = img[np.newaxis,:,:,:]
# subtract the imagenet mean for a RGB image
img -= np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3)) # In order to have channel = (channel - mean) / std with std = 1
# The input images should be zero-centered by mean pixel (rather than mean image)
# subtraction. Namely, the following BGR values should be subtracted: [103.939, 116.779, 123.68].
# From https://gist.github.com/ksimonyan/3785162f95cd2d5fee77#file-readme-md
try:
img = img[...,::-1] # rgb to bgr # In the Gang weight case the image are RGB
except IndexError:
raise
# Both VGG-16 and VGG-19 were trained using Caffe, and Caffe uses OpenCV to
# load images which uses BGR by default, so both VGG models are expecting BGR images.
# It is the case for the .mat save we are using here.
return(img)
def postprocess(img):
"""
To the unprocessing analogue to the "preprocess" function from 4D array
to RGB image
"""
# bgr to rgb
img = img[...,::-1]
# add the imagenet mean
img += np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
# shape (1, h, w, d) to (h, w, d)
img = img[0]
img = np.clip(img,0,255).astype('uint8')
return(img)
def get_M_dict(image_h,image_w):
"""
This function compute the size of the different dimension in the con
volutionnal net
"""
M_dict = {'conv1' : 0,'relu1' : 0,'pool1':0,'conv2' : 0,'relu2' : 0,'pool2':0,'conv3' : 0,'relu3' : 0,'pool3':0,'conv4': 0,'relu4' : 0,'pool4':0,'conv5' : 0,'relu5' : 0,'pool5':0}
image_h_tmp = image_h
image_w_tmp = image_w
M = image_h_tmp*image_w_tmp
for key in M_dict.keys():
if(key[:4]=='conv'):
M_dict[key] = M
elif(key[:4]=='pool'):
image_h_tmp = math.ceil(image_h_tmp / 2)
image_w_tmp = math.ceil(image_w_tmp / 2)
M = image_h_tmp*image_w_tmp
M_dict[key] = M
elif(key[:4]=='relu'):
M_dict[key] = M
M_dict['input'] = M_dict['conv1']
return(M_dict)
def print_loss_tab(sess,list_loss,list_loss_name):
"""
Fonction pour afficher la valeur des différentes loss
"""
strToPrint = ''
for loss,loss_name in zip(list_loss,list_loss_name):
loss_tmp = sess.run(loss)
strToPrint += loss_name + ' = {:.2e}, '.format(loss_tmp)
print(strToPrint)
def print_loss(sess,loss_total,content_loss,style_loss):
loss_total_tmp = sess.run(loss_total)
content_loss_tmp = sess.run(content_loss)
style_loss_tmp = sess.run(style_loss)
strToPrint ='Total loss = {:.2e}, Content loss = {:.2e}, Style loss = {:.2e}'.format(loss_total_tmp,content_loss_tmp,style_loss_tmp)
print(strToPrint)
def get_init_noise_img(image_content,init_noise_ratio,range_value):
""" This function return a white noise image for the initialisation
this image can be linearly miwed with the image content with a ratio
"""
_,image_h, image_w, number_of_channels = image_content.shape
low = 127.5 - range_value
high = 127.5 + range_value
noise_img = np.random.uniform(low,high, (image_h, image_w, number_of_channels)).astype('float32')
noise_img = preprocess(noise_img)
if(init_noise_ratio >= 1.):
noise_img = noise_img
elif(init_noise_ratio <= 0.0):
noise_img = image_content
else:
noise_img = init_noise_ratio* noise_img + (1.-init_noise_ratio) * image_content
return(noise_img)
def get_init_noise_img_smooth_grad(image_content):
"""
This function return a random initial image with a mean near to the
mean value of the content image and with a smooth gradient
"""
from skimage import filters
_,image_h, image_w, number_of_channels = image_content.shape
low = -1
high = 1
noise_img = np.random.uniform(low,high, (image_h, image_w, number_of_channels))
gaussian_noise_img = filters.gaussian(noise_img, sigma=2,mode='reflect')
for i in range(3):
gaussian_noise_img[:,:,i] += np.mean(image_content[:,:,i]) # Add the mean of each channel
gaussian_noise_img = np.clip(gaussian_noise_img,0.,255.)
preprocess_img = preprocess(gaussian_noise_img)
return(preprocess_img)
def get_init_noise_img_gaussian(image_content):
"""
Generate an image with a gaussian white noise aroud the BGR mean of the
image content
"""
b,image_h, image_w, number_of_channels = image_content.shape
noise_img = np.random.randn(b,image_h, image_w, number_of_channels)
bgr_mean = np.mean(image_content,axis=(0,1,2)) # This mean have been recentered by the ImageNet mean
for i in range(number_of_channels):
noise_img[:,:,:,i] += bgr_mean[i]
# random floats sampled from a univariate “normal” (Gaussian) distribution of mean 0 and variance 1
# Doesn't need preprocess because already arond 0 with a small range
return(noise_img)
def get_lbfgs_bnds_tf_1_2(init_img,clip_value_min,clip_value_max,BGR=False):
"""
This function create the bounds for the LBFGS scipy wrappper, for a
image centered according to the ImageNet mean
This version is for tensorflow 1.2
"""
dim1,height,width,N = init_img.shape
if(BGR==False):
# Bounds from [0,255] - [124,103] if ImageNet
bnd_inf = clip_value_min*np.ones((dim1,height,width,N)).flatten()
# We need to flatten the array in order to use it in the LBFGS algo
bnd_sup = clip_value_max*np.ones((dim1,height,width,N)).flatten()
bnds = np.stack((bnd_inf, bnd_sup),axis=-1)
else:
# To bound this variable : tf.Variable(np.zeros((1, height, width, numberChannels), dtype=np.float32)) by pair (min,max)
bnd_inf_B = clip_value_min[0]*np.ones((dim1,height,width))
bnd_inf_G = clip_value_min[1]*np.ones((dim1,height,width))
bnd_inf_R = clip_value_min[2]*np.ones((dim1,height,width))
bnd_inf = np.stack((bnd_inf_B, bnd_inf_G,bnd_inf_R),axis=-1)
bnd_inf = bnd_inf.flatten()
bnd_sup_B = clip_value_max[0]*np.ones((dim1,height,width))
bnd_sup_G = clip_value_max[1]*np.ones((dim1,height,width))
bnd_sup_R = clip_value_max[2]*np.ones((dim1,height,width))
bnd_sup = np.stack((bnd_sup_B, bnd_sup_G,bnd_sup_R),axis=-1)
bnd_sup = bnd_sup.flatten()
bnds = np.stack((bnd_inf, bnd_sup),axis=-1)
assert len(bnd_sup) == len(init_img.flatten()) # Check if the dimension is right
assert len(bnd_inf) == len(init_img.flatten())
# Test
x0 = np.asarray(init_img).ravel()
n, = x0.shape
if len(bnds) != n:
print("Erreur a venir car n",n,"len(bnds)",len(bnds))
return(bnds)
def get_lbfgs_bnds(init_img,clip_value_min,clip_value_max,BGR=False):
"""
This function create the bounds for the LBFGS scipy wrappper, for a
image centered according to the ImageNet mean
init_img is only used for the dimension of the image
clip_value_min,clip_value_max compute on the style image before normally
This function return bounds for tensorflow >= 1.3 to be used with the scipy
optimizer
"""
dim1,height,width,N = init_img.shape
if(BGR==False):
# Bounds from [0,255] - [124,103] if ImageNet
bnd_inf = clip_value_min*np.ones((dim1,height,width,N))
# We need to flatten the array in order to use it in the LBFGS algo
bnd_sup = clip_value_max*np.ones((dim1,height,width,N))
bnds = (bnd_inf, bnd_sup)
else:
# To bound this variable : tf.Variable(np.zeros((1, height, width, numberChannels), dtype=np.float32)) by pair (min,max)
bnd_inf_B = clip_value_min[0]*np.ones((dim1,height,width))
bnd_inf_G = clip_value_min[1]*np.ones((dim1,height,width))
bnd_inf_R = clip_value_min[2]*np.ones((dim1,height,width))
bnd_inf = np.stack((bnd_inf_B, bnd_inf_G,bnd_inf_R),axis=-1)
#bnd_inf = np.zeros_like(init_img)
#bnd_inf[:,:,:,0] = bnd_inf_B
#bnd_inf[:,:,:,1] = bnd_inf_G
#bnd_inf[:,:,:,2] = bnd_inf_R
bnd_sup_B = clip_value_max[0]*np.ones((dim1,height,width))
bnd_sup_G = clip_value_max[1]*np.ones((dim1,height,width))
bnd_sup_R = clip_value_max[2]*np.ones((dim1,height,width))
bnd_sup = np.stack((bnd_sup_B, bnd_sup_G,bnd_sup_R),axis=-1)
#bnd_sup = np.zeros_like(init_img)
#bnd_sup[:,:,:,0] = bnd_sup_B
#bnd_sup[:,:,:,1] = bnd_sup_G
#bnd_sup[:,:,:,2] = bnd_sup_R
#print(bnd_sup.shape)
bnds = (bnd_inf, bnd_sup)
return(bnds)
def get_Gram_matrix_wrap(args,vgg_layers,image_style,pooling_type='avg',padding='SAME'):
_,image_h_art, image_w_art, _ = image_style.shape
vgg_name = args.vgg_name
stringAdd = ''
if(vgg_name=='normalizedvgg.mat'):
stringAdd = '_n'
elif(vgg_name=='imagenet-vgg-verydeep-19.mat'):
stringAdd = '_v' # Regular one
elif(vgg_name=='random_net.mat'):
stringAdd = '_r' # random
data_style_path = args.data_folder + "gram_"+args.style_img_name+"_"+str(image_h_art)+"_"+str(image_w_art)+"_"+str(pooling_type)+"_"+str(padding)+stringAdd+".pkl"
if(vgg_name=='random_net.mat'):
try:
os.remove(data_style_path)
except:
pass
try:
if(args.verbose): print("Load Data ",data_style_path)
dict_gram = pickle.load(open(data_style_path, 'rb'))
except(FileNotFoundError):
if(args.verbose): print("The Gram Matrices doesn't exist, we will generate them.")
dict_gram = get_Gram_matrix(vgg_layers,image_style,pooling_type,padding)
with open(data_style_path, 'wb') as output_gram_pkl:
pickle.dump(dict_gram,output_gram_pkl)
if(args.verbose): print("Pickle dumped")
return(dict_gram)
def get_features_repr_wrap(args,vgg_layers,image_content,pooling_type='avg',padding='SAME'):
_,image_h, image_w, number_of_channels = image_content.shape
data_content_path = args.data_folder +args.content_img_name+"_"+str(image_h)+"_"+str(image_w)+"_"+str(pooling_type)+"_"+str(padding)+".pkl"
try:
dict_features_repr = pickle.load(open(data_content_path, 'rb'))
except(FileNotFoundError):
if(args.verbose): print("The dictionnary of features representation of content image doesn't exist, we will generate it.")
dict_features_repr = get_features_repr(vgg_layers,image_content,pooling_type,padding)
with open(data_content_path, 'wb') as output_content_pkl:
pickle.dump(dict_features_repr,output_content_pkl)
if(args.verbose): print("Pickle dumped")
return(dict_features_repr)
def plot_image_with_postprocess(args,image,name="",fig=None):
"""
Plot the image using matplotlib
"""
if(fig is None):
fig = plt.figure()
plt.imshow(postprocess(image))
plt.title(name)
if(args.verbose): print("Plot",name)
fig.canvas.flush_events()
time.sleep(10**(-6))
return(fig)
def get_init_img_wrap(args,output_image_path,image_content):
"""
Function that regroup different way to create differente value for
initial condition
"""
if(not(args.start_from_noise)):
try:
init_img = preprocess(scipy.misc.imread(output_image_path).astype('float32'))
if(args.verbose): print("Use the former image")
except(FileNotFoundError):
if(args.verbose): print("Former image not found, use of white noise mixed with the content image as initialization image")
# White noise that we use at the beginning of the optimization
init_img = get_init_noise_img(image_content,args.init_noise_ratio,args.init_range)
elif(args.init =='smooth_grad'):
if(args.verbose): print("Noisy image generation with a smooth gradient")
init_img = get_init_noise_img_smooth_grad(image_content) # TODO add a ratio for this kind of initialization also
elif(args.init=='Gaussian'):
if(args.verbose): print("Noisy image generation with a Gaussian white noise")
init_img = get_init_noise_img_gaussian(image_content)
elif(args.init=='Uniform'):
if(args.verbose): print("Noisy image generation init_noise_ratio = ",args.init_noise_ratio)
init_img = get_init_noise_img(image_content,args.init_noise_ratio,args.init_range)
elif(args.init=='Cst'):
if(args.verbose): print("Constante image")
_,image_h, image_w, number_of_channels = image_content.shape
noise_img = (127.5*np.ones((image_h, image_w, number_of_channels))).astype('float32')
init_img = preprocess(noise_img)
if(args.plot):
plot_image_with_postprocess(args,init_img.copy(),"Initial Image")
return(init_img)
def load_img(args,img_name):
"""
This function load the image and convert it to a numpy array and do
the preprocessing
"""
image_path = args.img_folder + img_name +args.img_ext
new_img_ext = args.img_ext
try:
img = scipy.misc.imread(image_path) # Float between 0 and 255
except IOError:
if(args.verbose): print("Exception when we try to open the image, try with a different extension format",str(args.img_ext))
if(args.img_ext==".jpg"):
new_img_ext = ".png"
elif(args.img_ext==".png"):
new_img_ext = ".jpg"
try:
image_path = args.img_folder + img_name +new_img_ext # Try the new path
img = scipy.misc.imread(image_path,mode='RGB')
if(args.verbose): print("The image have been sucessfully loaded with a different extension")
except IOError:
if(args.verbose): print("Exception when we try to open the image, we already test the 2 differents extension.")
raise
if(len(img.shape)==2):
if(args.verbose): print("Convert Grey Scale to RGB")
img = gray2rgb(img) # Convertion greyscale to RGB
img = preprocess(img.astype('float32'))
return(img)
def get_losses(args,sess, net, dict_features_repr,M_dict,image_style,dict_gram,pooling_type,padding):
""" Compute the total loss map of the sub loss """
#Get the layer used for style and other
if(args.config_layers=='PoolConfig'):
content_layers = [('conv4_2',1)]
style_layers = [('conv1_1',1),('pool1',1),('pool2',1),('pool3',1),('pool4',1)]
elif(args.config_layers=='FirstConvs'):
content_layers = [('conv4_2',1)]
style_layers = [('conv1_1',1),('conv2_1',1),('conv3_1',1)]
elif(args.config_layers=='Custom'):
content_layers = list(zip(args.content_layers, args.content_layer_weights))
style_layers = list(zip(args.style_layers,args.style_layer_weights))
if(args.verbose): print('content_layers',content_layers)
if(args.verbose): print('style_layers',style_layers)
loss_total = tf.constant(0.)
list_loss = []
list_loss_name = []
assert len(args.loss)
if('Gatys' in args.loss) or ('content' in args.loss) or ('full' in args.loss):
content_loss = args.content_strengh * sum_content_losses(sess, net, dict_features_repr,M_dict,content_layers) # alpha/Beta ratio
list_loss += [content_loss]
list_loss_name += ['content_loss']
if('Gatys' in args.loss) or ('texture' in args.loss) or ('full' in args.loss):
style_loss = sum_style_losses(sess, net, dict_gram,M_dict,style_layers)
list_loss += [style_loss]
list_loss_name += ['style_loss']
if('texMask' in args.loss):
mask_dict = pickle.load(open('mask_dict.pkl', 'rb'))
texture_mask_loss = texture_loss_wt_mask(sess, net, dict_gram,M_dict,mask_dict,style_layers)
list_loss += [texture_mask_loss]
list_loss_name += ['texture_mask_loss']
if('4moments' in args.loss):
style_stats_loss = sum_style_stats_loss(sess,net,image_style,M_dict,style_layers)
list_loss += [style_stats_loss]
list_loss_name += ['style_stats_loss']
if('InterScale' in args.loss) or ('full' in args.loss):
inter_scale_loss = loss_crosscor_inter_scale(sess,net,image_style,M_dict,style_layers,sampling=args.sampling,pooling_type=pooling_type)
list_loss += [inter_scale_loss]
list_loss_name += ['inter_scale_loss']
if('nmoments' in args.loss) or ('full' in args.loss):
loss_n_moments_val = loss_n_stats(sess,net,image_style,M_dict,args.n,style_layers,TypeOfComputation='moments')
list_loss += [loss_n_moments_val]
list_loss_name += ['loss_n_moments_val with (n = '+str(args.n)+')']
if('nmoments_reduce' in args.loss):
loss_n_moments_val = loss_n_stats(sess,net,image_style,M_dict,args.n,style_layers,TypeOfComputation='nmoments_reduce')
list_loss += [loss_n_moments_val]
list_loss_name += ['loss_n_moments_reduce_val with (n = '+str(args.n)+')']
if('Lp' in args.loss) or ('full' in args.loss):
loss_L_p_val = loss_n_stats(sess,net,image_style,M_dict,args.p,style_layers,TypeOfComputation='Lp')
list_loss += [loss_L_p_val]
list_loss_name += ['loss_L_p_val with (p = '+str(args.p)+')']
if('TV' in args.loss) or ('full' in args.loss):
tv_loss = sum_total_variation_losses(sess, net)
list_loss += [tv_loss]
list_loss_name += ['tv_loss']
if('TV1' in args.loss) :
tv_norm1_loss = sum_total_variation_losses_norm1(sess, net)
list_loss += [tv_norm1_loss]
list_loss_name += ['tv_norm1_loss']
if('bizarre' in args.loss) or ('full' in args.loss):
autocorrbizarre_loss = loss_autocorrbizarre(sess,net,image_style,M_dict,style_layers)
list_loss += [autocorrbizarre_loss]
list_loss_name += ['autocorrbizarre_loss']
if('autocorr' in args.loss) or ('full' in args.loss):
autocorr_loss = loss_autocorr(sess,net,image_style,M_dict,style_layers)
list_loss += [autocorr_loss]
list_loss_name += ['autocorr_loss']
if('autocorrLog' in args.loss):
autocorr_lossLog = loss_autocorrLog(sess,net,image_style,M_dict,style_layers)
list_loss += [autocorr_lossLog]
list_loss_name += ['autocorr_lossLog']
if('autocorr_rfft' in args.loss) or ('full' in args.loss):
autocorr_rfft_loss = loss_autocorr_rfft(sess,net,image_style,M_dict,style_layers)
list_loss += [autocorr_rfft_loss]
list_loss_name += ['autocorr_rfft_loss']
if('fft3D' in args.loss) or ('full' in args.loss):
fft3D_loss = loss_fft3D(sess,net,image_style,M_dict,style_layers)
list_loss += [fft3D_loss]
list_loss_name += ['fft3D_loss']
if('fftVect' in args.loss) or ('full' in args.loss):
fftVect_loss = loss_fft_vect(sess,net,image_style,M_dict,style_layers)
list_loss += [fftVect_loss]
list_loss_name += ['fftVect_loss']
if('spectrum' in args.loss) or ('full' in args.loss):
spectrum_loss = loss_spectrum(sess,net,image_style,M_dict)
list_loss += [spectrum_loss]
list_loss_name += ['spectrum_loss']
if('variance' in args.loss) or ('full' in args.loss):
variance_loss = loss_variance(sess,net,image_style,M_dict,style_layers)
list_loss += [variance_loss]
list_loss_name += ['variance_loss']
if('SpectrumOnFeatures' in args.loss) or ('full' in args.loss):
SpectrumOnFeatures_loss = loss_SpectrumOnFeatures(sess,net,image_style,M_dict,style_layers)
list_loss += [SpectrumOnFeatures_loss]
list_loss_name += ['SpectrumOnFeatures_loss']
if('phaseAlea' in args.loss) or ('full' in args.loss):
image_style_Phase = compute_ImagePhaseAlea(sess,net,image_style,M_dict,style_layers)
phaseAlea_loss = loss_PhaseAleatoire(sess,net,image_style,image_style_Phase,M_dict,style_layers)
list_loss += [phaseAlea_loss]
list_loss_name += ['phaseAlea_loss']
if('entropy' in args.loss) or ('full' in args.loss):
loss_entropy_value = loss_entropy(sess,net,image_style,M_dict,style_layers)
list_loss += [loss_entropy_value]
list_loss_name += ['loss_entropy_value']
if('phaseAleaSimple' in args.loss):
image_style_Phase = compute_ImagePhaseAlea(sess,net,image_style,M_dict,style_layers)
phaseAlea_loss = loss_PhaseAleatoireSimple(sess,net,image_style,image_style_Phase,M_dict,style_layers)
list_loss += [phaseAlea_loss]
list_loss_name += ['phaseAlea_loss_simple']
if('phaseAleaList' in args.loss) or ('full' in args.loss):
image_style_Phase = compute_ImagePhaseAlea_list(sess,net,image_style,M_dict,style_layers)
phaseAleaList_loss = loss_PhaseAleatoirelist(sess,net,image_style,image_style_Phase,M_dict,style_layers)
list_loss += [phaseAleaList_loss]
list_loss_name += ['phaseAleaList_loss']
if('intercorr' in args.loss) or ('full' in args.loss):
print("Risk to do a Ressource Exhausted error :) ")
intercorr_loss = loss_intercorr(sess,net,image_style,M_dict,style_layers)
list_loss += [intercorr_loss]
list_loss_name += ['intercorr_loss']
if('current' in args.loss) or ('full' in args.loss):
PhaseImpose_loss = loss_PhaseImpose(sess,net,image_style,M_dict,style_layers)
list_loss += [PhaseImpose_loss]
list_loss_name += ['PhaseImpose_loss']
if('HF' in args.loss) or ('full' in args.loss):
HF_loss = loss__HF_filter(sess, net, image_style,M_dict)
list_loss += [HF_loss]
list_loss_name += ['HF_loss']
if('HFmany' in args.loss):
HFmany_loss = loss__HF_many_filters(sess, net, image_style,M_dict)
list_loss += [HFmany_loss]
list_loss_name += ['HFmany_loss']
if(args.type_of_loss=='add'):
loss_total = tf.reduce_sum(list_loss)
elif(args.type_of_loss=='max'):
loss_total = tf.reduce_max(list_loss)
elif(args.type_of_loss=='mul'):
# If one of the sub loss is zero the total loss is zero !
if(args.verbose): print("Mul for the total loss : If one of the sub loss is zero the total loss is zero.")
loss_total = tf.constant(1.)
for loss in list_loss:
loss_total *= (loss*10**(-9))
elif(args.type_of_loss=='Keeney'):
if(args.verbose): print("Keeney for the total loss : they are a lot of different weight everywhere.")
loss_total = tf.constant(1.*10**9)
for loss in list_loss:
loss_total *= (loss*10**(-9) + 1.)
#Seem to optimize quickly but is stuck
else:
if(args.verbose): print("The loss aggregation function is not known")
list_loss += [loss_total]
list_loss_name += ['loss_total']
return(loss_total,list_loss,list_loss_name)
def style_transfer(args):
"""
This function is the main core of the program it need args in order to
set up all the things and run an optimization in order to produce an
image
"""
if args.verbose:
tinit = time.time()
print("verbosity turned on")
print(args)
output_image_path = args.img_output_folder + args.output_img_name + args.img_ext
if(args.verbose and args.img_ext=='.jpg'): print("Be careful you are saving the image in JPEG !")
image_content = load_img(args,args.content_img_name)
image_style = load_img(args,args.style_img_name)
_,image_h, image_w, number_of_channels = image_content.shape
M_dict = get_M_dict(image_h,image_w)
if(args.clipping_type=='ImageNet'):
BGR=False
clip_value_min,clip_value_max = get_clip_values(None,BGR)
elif(args.clipping_type=='ImageStyle'):
BGR=False
clip_value_min,clip_value_max = get_clip_values(image_style,BGR)
elif(args.clipping_type=='ImageStyleBGR'):
BGR = True
clip_value_min,clip_value_max = get_clip_values(image_style,BGR)
if(args.plot):
plt.ion()
plot_image_with_postprocess(args,image_content.copy(),"Content Image")
plot_image_with_postprocess(args,image_style.copy(),"Style Image")
fig = None # initialization for later
# TODO add something that reshape the image
t1 = time.time()
pooling_type = args.pooling_type
padding = args.padding
vgg_layers = get_vgg_layers(args.vgg_name)
# Precomputation Phase :
dict_gram = get_Gram_matrix_wrap(args,vgg_layers,image_style,pooling_type,padding)
dict_features_repr = get_features_repr_wrap(args,vgg_layers,image_content,pooling_type,padding)
net = net_preloaded(vgg_layers, image_content,pooling_type,padding) # The output image as the same size as the content one
t2 = time.time()
if(args.verbose): print("net loaded and gram computation after ",t2-t1," s")
try:
config = tf.ConfigProto()
if(args.gpu_frac <= 0.):
config.gpu_options.allow_growth = True
if args.verbose: print("Memory Growth")
elif(args.gpu_frac <= 1.):
config.gpu_options.per_process_gpu_memory_fraction = args.gpu_frac
if args.verbose: print("Becareful args.gpu_frac = ",args.gpu_frac,"It may cause problem if the value is superior to the available memory place.")
sess = tf.Session(config=config)
init_img = get_init_img_wrap(args,output_image_path,image_content)
loss_total,list_loss,list_loss_name = get_losses(args,sess, net, dict_features_repr,M_dict,image_style,dict_gram,pooling_type,padding)
# Preparation of the assignation operation
placeholder = tf.placeholder(tf.float32, shape=init_img.shape)
placeholder_clip = tf.placeholder(tf.float32, shape=init_img.shape)
assign_op = net['input'].assign(placeholder)
clip_op = tf.clip_by_value(placeholder_clip,clip_value_min=np.mean(clip_value_min),clip_value_max=np.mean(clip_value_max),name="Clip") # The np.mean is a necessity in the case whe got the BGR values TODO : need to change all that
if(args.verbose): print("init loss total")
if(args.optimizer=='adam'): # Gradient Descent with ADAM algo
optimizer = tf.train.AdamOptimizer(args.learning_rate)
elif(args.optimizer=='GD'): # Gradient Descente
if((args.learning_rate > 1) and (args.verbose)): print("We recommande you to use a smaller value of learning rate when using the GD algo")
optimizer = tf.train.GradientDescentOptimizer(args.learning_rate)
if((args.optimizer=='GD') or (args.optimizer=='adam')):
train = optimizer.minimize(loss_total)
sess.run(tf.global_variables_initializer())
sess.run(assign_op, {placeholder: init_img})
sess.graph.finalize() # To test if the graph is correct
if(args.verbose): print("sess.graph.finalize()")
t3 = time.time()
if(args.verbose): print("sess Adam initialized after ",t3-t2," s")
# turn on interactive mode
if(args.verbose): print("loss before optimization")
if(args.verbose): print_loss_tab(sess,list_loss,list_loss_name)
for i in range(args.max_iter):
if(i%args.print_iter==0):
if(args.tf_profiler):
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
sess.run(train,options=run_options, run_metadata=run_metadata)
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
if(args.verbose): print("Time Line generated")
nameFile = 'timeline'+str(i)+'.json'
with open(nameFile, 'w') as f:
if(args.verbose): print("Save Json tracking")
f.write(ctf)
# Read with chrome://tracing
else:
t3 = time.time()
sess.run(train)
t4 = time.time()
result_img = sess.run(net['input'])
if(args.clip_var==1): # Clipping the variable
cliptensor = sess.run(clip_op,{placeholder_clip: result_img})
sess.run(assign_op, {placeholder: cliptensor})
if(args.verbose): print("Iteration ",i, "after ",t4-t3," s")
if(args.verbose): print_loss_tab(sess,list_loss,list_loss_name)
if(args.plot): fig = plot_image_with_postprocess(args,result_img,"Intermediate Image",fig)
result_img_postproc = postprocess(result_img)
scipy.misc.toimage(result_img_postproc).save(output_image_path)
else:
# Just training
sess.run(train)
if(args.clip_var==1): # Clipping the variable
result_img = sess.run(net['input'])
cliptensor = sess.run(clip_op,{placeholder_clip: result_img})
sess.run(assign_op, {placeholder: cliptensor})
elif(args.optimizer=='lbfgs'):
# TODO : be able to detect of print_iter > max_iter and deal with it
nb_iter = args.max_iter // args.print_iter
max_iterations_local = args.max_iter // nb_iter
if(args.verbose): print("Start LBFGS optim with a print each ",max_iterations_local," iterations")
optimizer_kwargs = {'maxiter': max_iterations_local,'maxcor': args.maxcor}
# To solve the non retro compatibility of Tensorflow !
if(tf.__version__ >= '1.3'):
bnds = get_lbfgs_bnds(init_img,clip_value_min,clip_value_max,BGR)
trainable_variables = tf.trainable_variables()[0]
var_to_bounds = {trainable_variables: bnds}
optimizer = tf.contrib.opt.ScipyOptimizerInterface(loss_total,var_to_bounds=var_to_bounds,
method='L-BFGS-B',options=optimizer_kwargs)
else:
bnds = get_lbfgs_bnds_tf_1_2(init_img,clip_value_min,clip_value_max,BGR)
optimizer = tf.contrib.opt.ScipyOptimizerInterface(loss_total,bounds=bnds,
method='L-BFGS-B',options=optimizer_kwargs)
sess.run(tf.global_variables_initializer())
sess.run(assign_op, {placeholder: init_img})
sess.graph.finalize() # To test if the graph is correct
if(args.verbose): print("sess.graph.finalize()")
if(args.verbose): print("loss before optimization")
if(args.verbose): print_loss_tab(sess,list_loss,list_loss_name)
for i in range(nb_iter):
t3 = time.time()
optimizer.minimize(sess)
t4 = time.time()
if(args.verbose): print("Iteration ",i, "after ",t4-t3," s")
if(args.verbose): print_loss_tab(sess,list_loss,list_loss_name)
result_img = sess.run(net['input'])
if(args.plot): fig = plot_image_with_postprocess(args,result_img.copy(),"Intermediate Image",fig)
result_img_postproc = postprocess(result_img)
scipy.misc.imsave(output_image_path,result_img_postproc)
# The last iterations are not made
# The End : save the resulting image
result_img = sess.run(net['input'])
if(args.plot): plot_image_with_postprocess(args,result_img.copy(),"Final Image",fig)
result_img_postproc = postprocess(result_img)
scipy.misc.toimage(result_img_postproc).save(output_image_path)
if args.HistoMatching:
# Histogram Matching
if(args.verbose): print("Histogram Matching before saving")
result_img_postproc = Misc.histogram_matching(result_img_postproc, postprocess(image_style))
output_image_path_hist = args.img_output_folder + args.output_img_name+'_hist' +args.img_ext
scipy.misc.toimage(result_img_postproc).save(output_image_path_hist)
except:
if(args.verbose): print("Error, in the lbfgs case the image can be strange and incorrect")
result_img = sess.run(net['input'])
result_img_postproc = postprocess(result_img)
output_image_path_error = args.img_output_folder + args.output_img_name+'_error' +args.img_ext
scipy.misc.toimage(result_img_postproc).save(output_image_path_error)
# In the case of the lbfgs optimizer we only get the init_img if we did not do a check point before
raise
finally:
sess.close()
if(args.verbose):
print("Close Sess")
tend = time.time()
print("Computation total for ",tend-tinit," s")
if(args.plot): input("Press enter to end and close all")
def main():
#global args
parser = get_parser_args()
args = parser.parse_args()
style_transfer(args)
def main_with_option():
parser = get_parser_args()
image_style_name= "StarryNight_Big"
image_style_name= "StarryNight"
starry = "StarryNight"
marbre = 'GrungeMarbled0021_S'
tile = "TilesOrnate0158_1_S"
tile2 = "TilesZellige0099_1_S"
peddle = "pebbles"
brick = "BrickSmallBrown0293_1_S"
D ="D20_01"
orange = "orange"
bleu = "bleu"
glass = "glass"
damier ='DamierBig_Proces'
camouflage = 'Camouflage0003_S'
#img_output_folder = "images/"
image_style_name = tile
content_img_name = tile
max_iter = 2000
print_iter = 200
start_from_noise = 1 # True
init_noise_ratio = 1.0 # TODO add a gaussian noise on the image instead a uniform one
content_strengh = 0.001
optimizer = 'lbfgs'
learning_rate = 10 # 10 for adam and 10**(-10) for GD
maxcor = 10
sampling = 'up'
# In order to set the parameter before run the script
parser.set_defaults(style_img_name=image_style_name,max_iter=max_iter,
print_iter=print_iter,start_from_noise=start_from_noise,
content_img_name=content_img_name,init_noise_ratio=init_noise_ratio,
content_strengh=content_strengh,optimizer=optimizer,maxcor=maxcor,
learning_rate=learning_rate,sampling=sampling)
args = parser.parse_args()
style_transfer(args)
if __name__ == '__main__':
main_with_option()
# Use CUDA_VISIBLE_DEVICES='' python ... to avoid using CUDA
# Pour update Tensorflow : python3.6 -m pip install --upgrade tensorflow-gpu
|
gpl-3.0
|
iarroyof/nlp-pipeline
|
mkl_regressor.py
|
1
|
10487
|
from modshogun import *
from numpy import *
from sklearn.metrics import r2_score
from scipy.stats import randint
from scipy import stats
from scipy.stats import randint as sp_randint
from scipy.stats import expon
import sys, os
import Gnuplot, Gnuplot.funcutils
class mkl_regressor():
""" This is a Multiple Kernel Learning (MKL) for sklearn (scikit-learn) Python library. This MKL object is only for
regression for now. One can instantiate this object within CrossValidation, GridSearch or RandomizedSearch objects
for sklearn model selection. The MKL implementation used in this object is that from Shogun Machine learning tool.
There are some issues regarding to the selection of kernel bandwidths. They are randomly generated without any control
for now, so any contribution is welcome.
"""
def __init__(self, widths = None, kernel_weights = None, svm_c = 0.01, mkl_c = 1.0, svm_norm = 1, mkl_norm = 1, degree = 2,
median_width = None, width_scale = None, min_size=2, max_size = 10, kernel_size = None):
self.svm_c = svm_c
self.mkl_c = mkl_c
self.svm_norm = svm_norm
self.mkl_norm = mkl_norm
self.degree = degree
self.widths = widths
self.kernel_weights = kernel_weights
self.median_width = median_width
self.width_scale = width_scale
self.min_size = min_size
self.max_size = max_size
self.kernel_size = kernel_size
def combine_kernel(self):
self.__kernels = CombinedKernel()
for width in self.widths:
kernel = GaussianKernel()
kernel.set_width(width)
kernel.init(self.__feats_train, self.__feats_train)
self.__kernels.append_kernel(kernel)
del kernel
if self.degree > 0:
kernel = PolyKernel(10, self.degree)
kernel.init(self.__feats_train, self.__feats_train)
self.__kernels.append_kernel(kernel)
del kernel
self.__kernels.init(self.__feats_train, self.__feats_train)
def fit(self, X, y, **params):
for parameter, value in params.items():
setattr(self, parameter, value)
labels_train = RegressionLabels(y.reshape((len(y), )))
self.__feats_train = RealFeatures(X.T)
self.combine_kernel()
binary_svm_solver = SVRLight() # seems to be optional, with LibSVR it does not work.
self.__mkl = MKLRegression(binary_svm_solver)
self.__mkl.set_C(self.svm_c, self.svm_c)
self.__mkl.set_C_mkl(self.mkl_c)
self.__mkl.set_mkl_norm(self.mkl_norm)
self.__mkl.set_mkl_block_norm(self.svm_norm)
self.__mkl.set_kernel(self.__kernels)
self.__mkl.set_labels(labels_train)
try:
self.__mkl.train()
except SystemError as inst:
if "Assertion" in str(inst):
sys.stderr.write("""WARNING: Bad parameter combination: [svm_c %f mkl_c %f mkl_norm %f svm_norm %f, degree %d] \n widths %s \n
MKL error [%s]""" % (self.svm_c, self.mkl_c, self.mkl_norm, self.svm_norm, self.degree, self.widths, str(inst)))
pass
self.kernel_weights = self.__kernels.get_subkernel_weights()
self.kernel_size = len(self.kernel_weights)
self.__loaded = False
def predict(self, X):
self.__feats_test = RealFeatures(X.T)
ft = None
if not self.__loaded:
self.__kernels.init(self.__feats_train, self.__feats_test) # test for test
self.__mkl.set_kernel(self.__kernels)
else:
ft = CombinedFeatures()
for i in xrange(self.__mkl.get_kernel().get_num_subkernels()):
ft.append_feature_obj(self.__feats_test)
return self.__mkl.apply_regression(ft).get_labels()
def set_params(self, **params):
for parameter, value in params.items():
setattr(self, parameter, value)
if self.median_width: # If widths are specified, the specified median has priority, so widths will be automatically overwritten.
self.set_param_weights()
return self
def get_params(self, deep=False):
return {param: getattr(self, param) for param in dir(self) if not param.startswith('__') and not '__' in param and not callable(getattr(self,param))}
def score(self, X_t, y_t):
predicted = self.predict(X_t)
return r2_score(predicted, y_t)
def serialize_model (self, file_name, sl="save"):
from os.path import basename, dirname
from bz2 import BZ2File
import pickle
if sl == "save": mode = "wb"
elif sl == "load": mode = "rb"
else: sys.stderr.write("Bad option. Only 'save' and 'load' are available.")
f = BZ2File(file_name + ".bin", mode)
if not f:
sys.stderr.write("Error serializing kernel matrix.")
exit()
if sl == "save":
#self.feats_train.save_serializable(fstream)
#os.unlink(file_name)
pickle.dump(self.__mkl, f, protocol=2)
elif sl == "load":
#self.feats_train = RealFeatures()
#self.feats_train.load_serializable(fstream)
mkl = self.__mkl = pickle.load(f)
self.__loaded = True
else: sys.stderr.write("Bad option. Only 'save' and 'load' are available.")
def save(self, file_name = None):
""" Python reimplementated function for saving a pretrained MKL machine.
This method saves a trained MKL machine to the file 'file_name'. If not 'file_name' is given, a
dictionary 'mkl_machine' containing parameters of the given trained MKL object is returned.
Here we assumed all subkernels of the passed CombinedKernel are of the same family, so uniquely the
first kernel is used for verifying if the passed 'kernel' is a Gaussian mixture. If it is so, we insert
the 'widths' to the model dictionary 'mkl_machine'. An error is returned otherwise.
"""
self._support = []
self._num_support_vectors = self.__mkl.get_num_support_vectors()
self._bias = self.__mkl.get_bias()
for i in xrange(self._num_support_vectors):
self._support.append((self.__mkl.get_alpha(i), self.__mkl.get_support_vector(i)))
self._kernel_family = self.__kernels.get_first_kernel().get_name()
if file_name:
with open(file_name,'w') as f:
f.write(str(self.get_params())+'\n')
self.serialize_model(file_name, "save")
else:
return self.get_params()
def load(self, file_name):
""" This method receives a 'file.model' file name (if it is not in pwd, full path must be given). The loaded file
must contain at least a dictionary at its top. This dictionary must contain keys from which model
parameters will be read (including weights, C, etc.). For example:
{'bias': value, 'param_1': value,...,'support_vectors': [(idx, value),(idx, value)], param_n: value}
The MKL model is tuned to those parameters stored at the given file. Other file with double extension must
be jointly with the model file: '*file.model.bin' where the kernel matrix is encoded together with the kernel
machine.
"""
# Load machine parameters
with open(file_name, 'r') as pointer:
mkl_machine = eval(pointer.read())
# Set loaded parameters
for parameter, value in mkl_machine.items():
setattr(self, parameter, value)
# Load the machine itself
self.serialize_model(file_name, "load") # Instantiates the loaded MKL.
return self
def set_param_weights(self):
"""Gives a vector of weights which distribution is linear. The 'median_width' value is used as location parameter and
the 'width_scale' as for scaling parameter of the returned weights range. If not size of the output vector is given,
a random size between 'min_size' and 'max_size' is returned."""
assert self.median_width and self.width_scale and self.kernel_size # Width generation needed parameters
self.minimun_width_scale = 0.01
self.widths = linspace(start = self.median_width*self.minimun_width_scale,
stop = self.median_width*self.width_scale,
num = self.kernel_size)
class expon_vector(stats.rv_continuous):
def __init__(self, loc = 1.0, scale = None, min_size=2, max_size = 10, size = None):
self.loc = loc
self.scale = scale
self.min_size = min_size
self.max_size = max_size
self.size = size
def rvs(self):
if not self.size:
self.size = randint.rvs(low = self.min_size, high = self.max_size, size = 1)
if self.scale:
return expon.rvs(loc = self.loc * 0.09, scale = self.scale, size = self.size)
else:
return expon.rvs(loc = self.loc * 0.09, scale = self.loc * 8.0, size = self.size)
def test_predict(data, machine = None, model_file=None, labels = None, out_file = None, graph = False):
g = Gnuplot.Gnuplot()
if type(machine) is str:
assert model_file # Gven a machine name, model file for loading is necessary.
if "mkl_regerssion" == machine:
machine_ = mkl_regressor()
machine_.load(model_file)
# elif other machine types ...
else:
print "Error machine type"
exit()
# elif other machine types ...
else:
machine_ = machine
preds = machine_.predict(data)
if labels is not None:
r2 = r2_score(preds, labels)
else:
pred = preds; real = range(len(pred))
output = {}
output['learned_model'] = out_file
output['estimated_output'] = preds
output['best_params'] = machine_.get_params()
output['performance'] = r2
if out_file:
with open(out_file, "a") as f:
f.write(str(output)+'\n')
if graph:
print "R^2: ", r2
pred, real = zip(*sorted(zip(preds, labels), key=lambda tup: tup[1]))
print "Machine Parameters: ", machine_.get_params()
g.plot(Gnuplot.Data(pred, with_="lines"), Gnuplot.Data(real, with_="linesp") )
return output
|
gpl-3.0
|
stuarteberg/lazyflow
|
lazyflow/classifiers/sklearnLazyflowClassifier.py
|
2
|
3912
|
import cPickle as pickle
import numpy
from lazyflowClassifier import LazyflowVectorwiseClassifierABC, LazyflowVectorwiseClassifierFactoryABC
import logging
logger = logging.getLogger(__name__)
class SklearnLazyflowClassifierFactory(LazyflowVectorwiseClassifierFactoryABC):
"""
A factory for creating and training sklearn classifiers.
"""
VERSION = 1 # This is used to determine compatibility of pickled classifier factories.
# You must bump this if any instance members are added/removed/renamed.
def __init__(self, classifier_type, *args, **kwargs):
"""
classifier_type: The sklearn class to instantiate, e.g. sklearn.ensemble.RandomForestClassifier
args, kwargs: Passed on to the classifier constructor.
"""
self._args = args
self._kwargs = kwargs
self._classifier_type = classifier_type
def create_and_train(self, X, y, feature_names=None):
X = numpy.asarray(X, numpy.float32)
y = numpy.asarray(y, numpy.uint32)
assert X.ndim == 2
assert len(X) == len(y)
sklearn_classifier = self._classifier_type(*self._args, **self._kwargs)
logger.debug( 'Training new sklearn classifier: {}'.format( type(sklearn_classifier).__name__ ) )
sklearn_classifier.fit(X, y)
try:
# Save for future reference
known_classes = sklearn_classifier.classes_
except AttributeError:
# Some sklearn classifiers don't have a 'classes_' attribute.
known_classes = numpy.unique(y)
return SklearnLazyflowClassifier( sklearn_classifier, known_classes, X.shape[1], feature_names )
@property
def description(self):
return self._classifier_type.__name__
def __eq__(self, other):
return ( isinstance(other, type(self))
and self._classifier_type == other._classifier_type
and self._args == other._args
and self._kwargs == other._kwargs )
def __ne__(self, other):
return not self.__eq__(other)
assert issubclass( SklearnLazyflowClassifierFactory, LazyflowVectorwiseClassifierFactoryABC )
class SklearnLazyflowClassifier(LazyflowVectorwiseClassifierABC):
VERSION = 2 # Used for pickling compatibility
class VersionIncompatibilityError(Exception):
pass
def __init__(self, sklearn_classifier, known_classes, feature_count, feature_names):
self._sklearn_classifier = sklearn_classifier
self._known_classes = known_classes
self._feature_count = feature_count
self._feature_names = feature_names
self.VERSION = SklearnLazyflowClassifier.VERSION
def predict_probabilities(self, X):
logger.debug( 'Predicting with sklearn classifier: {}'.format( type(self._sklearn_classifier).__name__ ) )
return self._sklearn_classifier.predict_proba( numpy.asarray(X, dtype=numpy.float32) )
@property
def known_classes(self):
return self._known_classes
@property
def feature_count(self):
return self._feature_count
@property
def feature_names(self):
return self._feature_names
def serialize_hdf5(self, h5py_group):
h5py_group['pickled_classifier'] = pickle.dumps( self )
# This is a required field for all classifiers
h5py_group['pickled_type'] = pickle.dumps( type(self) )
@classmethod
def deserialize_hdf5(cls, h5py_group):
pickled = h5py_group['pickled_classifier'][()]
classifier = pickle.loads( pickled )
if not hasattr(classifier, "VERSION") or classifier.VERSION != cls.VERSION:
raise cls.VersionIncompatibilityError("Version mismatch. Deserialized classifier version does not match this code base.")
return classifier
assert issubclass( SklearnLazyflowClassifier, LazyflowVectorwiseClassifierABC )
|
lgpl-3.0
|
GuessWhoSamFoo/pandas
|
pandas/core/dtypes/missing.py
|
1
|
15114
|
"""
missing types & inference
"""
import numpy as np
from pandas._libs import lib, missing as libmissing
from pandas._libs.tslibs import NaT, iNaT
from .common import (
_NS_DTYPE, _TD_DTYPE, ensure_object, is_bool_dtype, is_complex_dtype,
is_datetime64_dtype, is_datetime64tz_dtype, is_datetimelike,
is_datetimelike_v_numeric, is_dtype_equal, is_extension_array_dtype,
is_float_dtype, is_integer_dtype, is_object_dtype, is_period_dtype,
is_scalar, is_string_dtype, is_string_like_dtype, is_timedelta64_dtype,
needs_i8_conversion, pandas_dtype)
from .generic import (
ABCDatetimeArray, ABCExtensionArray, ABCGeneric, ABCIndexClass,
ABCMultiIndex, ABCSeries, ABCTimedeltaArray)
from .inference import is_list_like
isposinf_scalar = libmissing.isposinf_scalar
isneginf_scalar = libmissing.isneginf_scalar
def isna(obj):
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
isnull = isna
def _isna_new(obj):
if is_scalar(obj):
return libmissing.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass,
ABCExtensionArray,
ABCDatetimeArray, ABCTimedeltaArray)):
return _isna_ndarraylike(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=isna))
elif isinstance(obj, list):
return _isna_ndarraylike(np.asarray(obj, dtype=object))
elif hasattr(obj, '__array__'):
return _isna_ndarraylike(np.asarray(obj))
else:
return obj is None
def _isna_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
return libmissing.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isna_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=_isna_old))
elif isinstance(obj, list):
return _isna_ndarraylike_old(np.asarray(obj, dtype=object))
elif hasattr(obj, '__array__'):
return _isna_ndarraylike_old(np.asarray(obj))
else:
return obj is None
_isna = _isna_new
def _use_inf_as_na(key):
"""Option change callback for na/inf behaviour
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* http://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
from pandas.core.config import get_option
flag = get_option(key)
if flag:
globals()['_isna'] = _isna_old
else:
globals()['_isna'] = _isna_new
def _isna_ndarraylike(obj):
is_extension = is_extension_array_dtype(obj)
if not is_extension:
# Avoid accessing `.values` on things like
# PeriodIndex, which may be expensive.
values = getattr(obj, 'values', obj)
else:
values = obj
dtype = values.dtype
if is_extension:
if isinstance(obj, (ABCIndexClass, ABCSeries)):
values = obj._values
else:
values = obj
result = values.isna()
elif isinstance(obj, ABCDatetimeArray):
return obj.isna()
elif is_string_dtype(dtype):
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
# object array of strings
result = np.zeros(values.shape, dtype=bool)
else:
# object array of non-strings
result = np.empty(shape, dtype=bool)
vec = libmissing.isnaobj(values.ravel())
result[...] = vec.reshape(shape)
elif needs_i8_conversion(dtype):
# this is the NaT pattern
result = values.view('i8') == iNaT
else:
result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def _isna_ndarraylike_old(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if is_string_dtype(dtype):
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = libmissing.isnaobj_old(values.ravel())
result[:] = vec.reshape(shape)
elif is_datetime64_dtype(dtype):
# this is the NaT pattern
result = values.view('i8') == iNaT
else:
result = ~np.isfinite(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def notna(obj):
"""
Detect non-missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are valid (not missing, which is ``NaN`` in numeric
arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : array-like or object value
Object to check for *not* null or *non*-missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is valid.
See Also
--------
isna : Boolean inverse of pandas.notna.
Series.notna : Detect valid values in a Series.
DataFrame.notna : Detect valid values in a DataFrame.
Index.notna : Detect valid values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.notna('dog')
True
>>> pd.notna(np.nan)
False
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.notna(array)
array([[ True, False, True],
[ True, True, False]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.notna(index)
array([ True, True, False, True])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.notna(df)
0 1 2
0 True True True
1 True False True
>>> pd.notna(df[1])
0 True
1 False
Name: 1, dtype: bool
"""
res = isna(obj)
if is_scalar(res):
return not res
return ~res
notnull = notna
def _isna_compat(arr, fill_value=np.nan):
"""
Parameters
----------
arr: a numpy array
fill_value: fill value, default to np.nan
Returns
-------
True if we can fill using this fill_value
"""
dtype = arr.dtype
if isna(fill_value):
return not (is_bool_dtype(dtype) or
is_integer_dtype(dtype))
return True
def array_equivalent(left, right, strict_nan=False):
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs
in corresponding locations. False otherwise. It is assumed that left and
right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
strict_nan : bool, default False
If True, consider NaN and None to be different.
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(
... np.array([1, 2, np.nan]),
... np.array([1, 2, np.nan]))
True
>>> array_equivalent(
... np.array([1, np.nan, 2]),
... np.array([1, 2, np.nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
# shape compat
if left.shape != right.shape:
return False
# Object arrays can contain None, NaN and NaT.
# string dtypes must be come to this path for NumPy 1.7.1 compat
if is_string_dtype(left) or is_string_dtype(right):
if not strict_nan:
# isna considers NaN and None to be equivalent.
return lib.array_equivalent_object(
ensure_object(left.ravel()), ensure_object(right.ravel()))
for left_value, right_value in zip(left, right):
if left_value is NaT and right_value is not NaT:
return False
elif isinstance(left_value, float) and np.isnan(left_value):
if (not isinstance(right_value, float) or
not np.isnan(right_value)):
return False
else:
if left_value != right_value:
return False
return True
# NaNs can occur in float and complex arrays.
if is_float_dtype(left) or is_complex_dtype(left):
# empty
if not (np.prod(left.shape) and np.prod(right.shape)):
return True
return ((left == right) | (isna(left) & isna(right))).all()
# numpy will will not allow this type of datetimelike vs integer comparison
elif is_datetimelike_v_numeric(left, right):
return False
# M8/m8
elif needs_i8_conversion(left) and needs_i8_conversion(right):
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.view('i8')
right = right.view('i8')
# if we have structured dtypes, compare first
if (left.dtype.type is np.void or
right.dtype.type is np.void):
if left.dtype != right.dtype:
return False
return np.array_equal(left, right)
def _infer_fill_value(val):
"""
infer the fill value for the nan/NaT from the provided
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
element to provide proper block construction
"""
if not is_list_like(val):
val = [val]
val = np.array(val, copy=False)
if is_datetimelike(val):
return np.array('NaT', dtype=val.dtype)
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(ensure_object(val), skipna=False)
if dtype in ['datetime', 'datetime64']:
return np.array('NaT', dtype=_NS_DTYPE)
elif dtype in ['timedelta', 'timedelta64']:
return np.array('NaT', dtype=_TD_DTYPE)
return np.nan
def _maybe_fill(arr, fill_value=np.nan):
"""
if we have a compatible fill_value and arr dtype, then fill
"""
if _isna_compat(arr, fill_value):
arr.fill(fill_value)
return arr
def na_value_for_dtype(dtype, compat=True):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : boolean, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype('int64'))
0
>>> na_value_for_dtype(np.dtype('int64'), compat=False)
nan
>>> na_value_for_dtype(np.dtype('float64'))
nan
>>> na_value_for_dtype(np.dtype('bool'))
False
>>> na_value_for_dtype(np.dtype('datetime64[ns]'))
NaT
"""
dtype = pandas_dtype(dtype)
if is_extension_array_dtype(dtype):
return dtype.na_value
if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or
is_timedelta64_dtype(dtype) or is_period_dtype(dtype)):
return NaT
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
if compat:
return 0
return np.nan
elif is_bool_dtype(dtype):
return False
return np.nan
def remove_na_arraylike(arr):
"""
Return array-like containing only true/non-NaN values, possibly empty.
"""
if is_extension_array_dtype(arr):
return arr[notna(arr)]
else:
return arr[notna(lib.values_from_object(arr))]
|
bsd-3-clause
|
elinebakker/paparazzi
|
sw/tools/calibration/calibration_utils.py
|
27
|
12769
|
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function, division
import re
import numpy as np
from numpy import sin, cos
from scipy import linalg, stats
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_ids_in_log(filename):
"""Returns available ac_id from a log."""
f = open(filename, 'r')
ids = []
pattern = re.compile("\S+ (\S+)")
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
ac_id = m.group(1)
if not ac_id in ids:
ids.append(ac_id)
return ids
def read_log(ac_id, filename, sensor):
"""Extracts raw sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_scaled(ac_id, filename, sensor, t_start, t_end):
"""Extracts scaled sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_SCALED (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
if (float(m.group(1)) >= float(t_start)) and (float(m.group(1)) < (float(t_end)+1.0)):
list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_mag_current(ac_id, filename):
"""Extracts raw magnetometer and current measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_MAG_CURRENT_CALIBRATION (\S+) (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5))])
return np.array(list_meas)
def filter_meas(meas, window_size, noise_threshold):
"""Select only non-noisy data."""
filtered_meas = []
filtered_idx = []
for i in range(window_size, len(meas)-window_size):
noise = meas[i-window_size:i+window_size, :].std(axis=0)
if linalg.norm(noise) < noise_threshold:
filtered_meas.append(meas[i, :])
filtered_idx.append(i)
return np.array(filtered_meas), filtered_idx
def get_min_max_guess(meas, scale):
"""Initial boundary based calibration."""
max_meas = meas[:, :].max(axis=0)
min_meas = meas[:, :].min(axis=0)
range = max_meas - min_meas
# check if we would get division by zero
if range.all():
n = (max_meas + min_meas) / 2
sf = 2*scale/range
return np.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]])
else:
return np.array([0, 0, 0, 0])
def scale_measurements(meas, p):
"""Scale the set of measurements."""
l_comp = []
l_norm = []
for m in meas[:, ]:
sm = (m - p[0:3])*p[3:6]
l_comp.append(sm)
l_norm.append(linalg.norm(sm))
return np.array(l_comp), np.array(l_norm)
def estimate_mag_current_relation(meas):
"""Calculate linear coefficient of magnetometer-current relation."""
coefficient = []
for i in range(0, 3):
gradient, intercept, r_value, p_value, std_err = stats.linregress(meas[:, 3], meas[:, i])
coefficient.append(gradient)
return coefficient
def print_xml(p, sensor, res):
"""Print xml for airframe file."""
print("")
print("<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>")
print("<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>")
print("<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>")
print("<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Z_SENS\" value=\""+str(p[5]*2**res)+"\" integer=\"16\"/>")
print("")
def print_imu_scaled(sensor, measurements, attrs):
print("")
print(sensor+" : Time Range("+str(measurements[:,0].min(axis=0))+" : "+str(measurements[:,0].max(axis=0))+")")
np.set_printoptions(formatter={'float': '{:-7.3f}'.format})
print(" " + attrs[2] + " " + attrs[3] + " " + attrs[4])
print("Min " + str(measurements[:,1:].min(axis=0)*attrs[0]) + " " + attrs[1])
print("Max " + str(measurements[:,1:].max(axis=0)*attrs[0]) + " " + attrs[1])
print("Mean " + str(measurements[:,1:].mean(axis=0)*attrs[0]) + " " + attrs[1])
print("StDev " + str(measurements[:,1:].std(axis=0)*attrs[0]) + " " + attrs[1])
def plot_measurements(sensor, measurements):
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.ylabel('ADC')
plt.title("Raw %s measurements" % sensor)
plt.show()
def plot_results(sensor, measurements, flt_idx, flt_meas, cp0, np0, cp1, np1, sensor_ref, blocking=True):
"""Plot calibration results."""
# plot raw measurements with filtered ones marked as red circles
plt.subplot(3, 1, 1)
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.plot(flt_idx, flt_meas[:, 0], 'ro')
plt.plot(flt_idx, flt_meas[:, 1], 'ro')
plt.plot(flt_idx, flt_meas[:, 2], 'ro')
plt.ylabel('ADC')
plt.title('Raw '+sensor+', red dots are actually used measurements')
plt.tight_layout()
# show scaled measurements with initial guess
plt.subplot(3, 2, 3)
plt.plot(cp0[:, 0])
plt.plot(cp0[:, 1])
plt.plot(cp0[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (initial guess)')
plt.xticks([])
plt.subplot(3, 2, 4)
plt.plot(np0)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (initial guess)')
plt.xticks([])
# show scaled measurements after optimization
plt.subplot(3, 2, 5)
plt.plot(cp1[:, 0])
plt.plot(cp1[:, 1])
plt.plot(cp1[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (optimized)')
plt.xticks([])
plt.subplot(3, 2, 6)
plt.plot(np1)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (optimized)')
plt.xticks([])
# if we want to have another plot we only draw the figure (non-blocking)
# also in matplotlib before 1.0.0 there is only one call to show possible
if blocking:
plt.show()
else:
plt.draw()
def plot_imu_scaled(sensor, measurements, attrs):
"""Plot imu scaled results."""
plt.figure("Sensor Scaled")
plt.subplot(4, 1, 1)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0])
#plt.xlabel('Time (s)')
plt.ylabel(attrs[1])
plt.title(sensor)
plt.subplot(4, 1, 2)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0], 'b')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[2])
plt.subplot(4, 1, 3)
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0], 'g')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[3])
plt.subplot(4, 1, 4)
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0], 'r')
plt.xlabel('Time (s)')
plt.ylabel(attrs[4])
plt.show()
def plot_imu_scaled_fft(sensor, measurements, attrs):
"""Plot imu scaled fft results."""
#dt = 0.0769
#Fs = 1/dt
Fs = 26.0
plt.figure("Sensor Scaled - FFT")
plt.subplot(3, 1, 1)
plt.magnitude_spectrum(measurements[:, 1]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[2])
plt.title(sensor)
plt.subplot(3, 1, 2)
plt.magnitude_spectrum(measurements[:, 2]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[3])
plt.subplot(3, 1, 3)
plt.magnitude_spectrum(measurements[:, 3]*attrs[0], Fs=Fs, scale='linear')
plt.xlabel('Frequency')
plt.ylabel(attrs[4])
plt.show()
def plot_mag_3d(measured, calibrated, p):
"""Plot magnetometer measurements on 3D sphere."""
# set up points for sphere and ellipsoid wireframes
u = np.r_[0:2 * np.pi:20j]
v = np.r_[0:np.pi:20j]
wx = np.outer(cos(u), sin(v))
wy = np.outer(sin(u), sin(v))
wz = np.outer(np.ones(np.size(u)), cos(v))
ex = p[0] * np.ones(np.size(u)) + np.outer(cos(u), sin(v)) / p[3]
ey = p[1] * np.ones(np.size(u)) + np.outer(sin(u), sin(v)) / p[4]
ez = p[2] * np.ones(np.size(u)) + np.outer(np.ones(np.size(u)), cos(v)) / p[5]
# measurements
mx = measured[:, 0]
my = measured[:, 1]
mz = measured[:, 2]
# calibrated values
cx = calibrated[:, 0]
cy = calibrated[:, 1]
cz = calibrated[:, 2]
# axes size
left = 0.02
bottom = 0.05
width = 0.46
height = 0.9
rect_l = [left, bottom, width, height]
rect_r = [left/2+0.5, bottom, width, height]
fig = plt.figure(figsize=plt.figaspect(0.5))
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_l)
else:
ax = fig.add_subplot(1, 2, 1, position=rect_l, projection='3d')
# plot measurements
ax.scatter(mx, my, mz)
plt.hold(True)
# plot line from center to ellipsoid center
ax.plot([0.0, p[0]], [0.0, p[1]], [0.0, p[2]], color='black', marker='+', markersize=10)
# plot ellipsoid
ax.plot_wireframe(ex, ey, ez, color='grey', alpha=0.5)
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([mx.max() - mx.min(), my.max() - my.min(), mz.max() - mz.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (mx.max() + mx.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (my.max() + my.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (mz.max() + mz.min())
# add the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
ax.set_title('MAG raw with fitted ellipsoid and center offset')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_r)
else:
ax = fig.add_subplot(1, 2, 2, position=rect_r, projection='3d')
ax.plot_wireframe(wx, wy, wz, color='grey', alpha=0.5)
plt.hold(True)
ax.scatter(cx, cy, cz)
ax.set_title('MAG calibrated on unit sphere')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
plt.show()
def read_turntable_log(ac_id, tt_id, filename, _min, _max):
""" Read a turntable log.
return an array which first column is turnatble and next 3 are gyro
"""
f = open(filename, 'r')
pattern_g = re.compile("(\S+) "+str(ac_id)+" IMU_GYRO_RAW (\S+) (\S+) (\S+)")
pattern_t = re.compile("(\S+) "+str(tt_id)+" IMU_TURNTABLE (\S+)")
last_tt = None
list_tt = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern_t, line)
if m:
last_tt = float(m.group(2))
m = re.match(pattern_g, line)
if m and last_tt and _min < last_tt < _max:
list_tt.append([last_tt, float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_tt)
|
gpl-2.0
|
trevstanhope/row-assist
|
tests/camera.py
|
1
|
1916
|
import cv, cv2
from matplotlib import pyplot as plot
import numpy as np
CAMERA_INDEX = 0
HUE_MIN = 10
HUE_MAX = 100
PIXEL_WIDTH = 640
PIXEL_HEIGHT = 480
THRESHOLD_PERCENTILE = 90
camera = cv2.VideoCapture(CAMERA_INDEX)
camera.set(cv.CV_CAP_PROP_FRAME_WIDTH, PIXEL_WIDTH)
camera.set(cv.CV_CAP_PROP_FRAME_HEIGHT, PIXEL_HEIGHT)
camera.set(cv.CV_CAP_PROP_SATURATION, 1.0)
camera.set(cv.CV_CAP_PROP_BRIGHTNESS, 0.5)
camera.set(cv.CV_CAP_PROP_CONTRAST, 0.5)
camera.set(cv.CV_CAP_PROP_GAIN, 0.5)
while True:
try:
(s, bgr) = camera.read()
#bgr = np.rot90(bgr)
if s:
hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
hue_min = HUE_MIN
hue_max = HUE_MAX
sat_min = np.percentile(hsv[:,:,1], 5)
sat_max = 255
val_min = np.percentile(hsv[:,:,2], 50)
val_max = 255
threshold_min = np.array([hue_min, sat_min, val_min], np.uint8)
threshold_max = np.array([hue_max, sat_max, val_max], np.uint8)
mask = cv2.inRange(hsv, threshold_min, threshold_max)
#mask = cv2.erode(mask, np.ones((3,3), np.uint8), iterations =2)
#mask = cv2.dilate(mask, np.ones((3,3), np.uint8), iterations =1)
column_sum = mask.sum(axis=0) # vertical summation
centroid = int(np.argmax(column_sum))
egi = np.dstack((mask, mask, mask))
bgr[:,centroid-1:centroid+1,:] = 0
egi[:,centroid-1:centroid+1,:] = 255
hsv[:,centroid-1:centroid+1,:] = 255
bgr[:,320,:] = 255
egi[:,320,:] = 255
hsv[:,320,:] = 255
output = np.hstack((bgr, egi))
cv2.imshow('', output)
if cv2.waitKey(5) == 27:
pass
print(centroid)
except Exception as error:
print('ERROR: %s' % str(error))
break
|
mit
|
adamryczkowski/powtarzane-cwiczenia
|
dyskryminator.py
|
1
|
8615
|
#!/bin/usr/python3
'''To jest klasa, która potrafi działać jako dyskryminator Real-time
'''
from scipy.stats.kde import gaussian_kde #Do wygładzania histogramów
import numpy as np
import pandas as pd
from konfiguracja import *
import math
def cached_gaussian_kde(fn):
'''fn musi być wyprodukowane przez gaussian_kde'''
minmax = (fn.dataset.min(), fn.dataset.max())
width = (minmax[1]-minmax[0])*1.2 #(dodajemy 20%)
mid = (minmax[0]+minmax[1])/2
minmax = (mid - width/2, mid + width/2)
Nsteps = 100
step = width/Nsteps
val = np.zeros(Nsteps+1)
i=0
for x in np.linspace(minmax[0], minmax[1], num=Nsteps+1):
val[i]= fn(x)
i=i+1
# print(i),
def ans(x):
if x < minmax[0]:
return(0)
if x > minmax[1]:
return(0)
i = (x - minmax[0])/step
imin = math.floor(i)
ipart = i - imin
xmin = val[imin]
xmax = val[imin+1]
return(xmin * (1-ipart) + xmax * ipart)
print('.')
return(ans)
def logLikelihood(self, histogram, record):
'''Funkcja zwraca log-likelihood dla wektora x, tj. sumę log-likelihoodów'''
ans = 0
if len(record) != len(histogram):
raise ArithmeticError
for i in range(0, len(histogram)):
val = histogram[i](record[i])
if val == 0:
val = -30
else:
val = math.log(val)
ans = ans + val
return(ans)
class LikelihoodsCalc:
def __init__(self, senseFn, senseNames, generator):
'''to jest klasa, która potrafi policzyć log-likelihood dla danych'''
self.senseFn = senseFn
self.senseNames = senseNames
self.generator = generator
self.histograms = None
def CreateHistograms(self,csvpath=None):
'''ta funkcja każe tworzyć histogramy od zera'''
bd = pd.DataFrame(columns=self.senseNames,dtype=float)
# ar = []
for chunk in self.generator:
cechy = self.senseFn(chunk)
# ar.append(cechy)
bd.loc[bd.shape[0]] = cechy
if csvpath != None:
bd.to_csv(csvpath, index=False)
dyscr = []
for col in bd.columns:
dyscr.append(cached_gaussian_kde(gaussian_kde(bd[col])))
self.histograms = dyscr
def LoglikelihoodFromRecord(self, record):
ans = 0
if len(record) != len(self.senseNames):
raise ArithmeticError
for i in range(0, len(self.senseNames)):
val = self.histograms[i](record[i])
if val == 0:
val = -30
else:
val = math.log(val)
ans = ans + val
return(ans)
class LikelihoodCalc:
def __init__(self, generator):
'''to jest klasa, która potrafi policzyć pojedynczy log-likelihood'''
self.generator = generator
self.histograms = None
def CreateHistograms(self, csvpath=None):
'''ta funkcja każe tworzyć histogramy od zera'''
bd1 = pd.DataFrame(columns=['Sampl'],dtype=np.int)
bd2 = pd.DataFrame(columns=['LogLik'],dtype=np.float)
bd=pd.concat([bd1,bd2],1)
i = 0
for val in self.generator:
bd.loc[i] = [i,val]
i=i+1
if csvpath!=None:
bd.to_csv(csvpath, index=False)
c=bd[bd.columns[1]]
d=gaussian_kde(c)
self.histogram = cached_gaussian_kde(d)
def LoglikelihoodFromVal(self, val):
ans = self.histogram(val)
if ans == 0:
ans = -30
else:
ans = math.log(ans)
return(ans)
def LikelihoodFromVal(self, val):
ans = self.histogram(val)
return(ans)
class Klasyfikator:
def __init__(self, senseFn, senseNames):
'''senseFn jest funkcją, która dla zadanego elementu danych zwraca wektor liczb określających kolejne cechy.
Liczba cech musi być koniecznie stała dla każdego elementu danych.
senseNames jest wektorem nazw tych zmysłów (dla debugowania i raportów)'''
self.senseFn = senseFn
self.senseNames = senseNames
def TrainMe(self, negativeGenerator, positiveGenerator, altnegativeGenerator, altpositiveGenerator):
def dodajSenseFn(generator, hist):
for chunk in generator:
record = self.senseFn(chunk)
val = hist.LoglikelihoodFromRecord(record)
yield(val)
self.histNeg = LikelihoodsCalc(self.senseFn, self.senseNames, negativeGenerator)
self.histNeg.CreateHistograms()
negGeneratorAlt = dodajSenseFn(altnegativeGenerator, self.histNeg)
self.logLikNeg = LikelihoodCalc(negGeneratorAlt)
self.logLikNeg.CreateHistograms()
self.histPos = LikelihoodsCalc(self.senseFn, self.senseNames, positiveGenerator)
self.histPos.CreateHistograms()
posGeneratorAlt = dodajSenseFn(altpositiveGenerator, self.histPos)
self.logLikPos = LikelihoodCalc(posGeneratorAlt)
self.logLikPos.CreateHistograms()
def GetPosterior(self, chunk):
rec = self.senseFn(chunk)
valNeg = self.histNeg.LoglikelihoodFromRecord(rec)
valNeg = self.logLikNeg.LikelihoodFromVal(valNeg)
valPos = self.histPos.LoglikelihoodFromRecord(rec)
valPos = self.logLikPos.LikelihoodFromVal(valNeg)
if valPos + valNeg == 0:
print("Coś nie tak!!")
return(0)
else:
return(valPos / (valPos + valNeg))
def Main():
import pyaudio
import wave
CHUNK_SIZE=1024
FORMAT = pyaudio.paInt16
SAMPLING_RATE = 44100
OFFSET = 13 #O tyle będziemy przeuswać sygnał, aby dostać coś na kształt bootstrapu
MIN_VOICE_FREQ = 86 #W Hz
CHUNK_FREQ = int(SAMPLING_RATE // MIN_VOICE_FREQ // 2)
def ZrobFFT(sygnalChunk):
spectr = np.abs(np.fft.fft(sygnalChunk))
return(spectr[1:CHUNK_FREQ+1])
def WczytajSygnal(path):
sig=wave.open(path,'rb')
length=sig.getnframes()
wholed=sig.readframes(length)
wholend=np.frombuffer(buffer=wholed,dtype=np.int16)
return(wholend)
def fromWav(wavpath, SAMPLING_RATE = 44100, OFFSET = 13, MIN_VOICE_FREQ = 86):
'''to jest generator chunków'''
CHUNK_FREQ = int(SAMPLING_RATE // MIN_VOICE_FREQ // 2)
wholend=WczytajSygnal(wavpath)
a = wholend
for i in range(0, CHUNK_SIZE//OFFSET):
for j in range(0, len(wholend), CHUNK_SIZE):
if j + CHUNK_SIZE < len(a):
yield a[j:(j+CHUNK_SIZE)]
a = np.roll(a, OFFSET)
def nazwy(SAMPLING_RATE = 44100, MIN_VOICE_FREQ = 86):
CHUNK_FREQ = int(SAMPLING_RATE // MIN_VOICE_FREQ // 2)
colnames = np.zeros(CHUNK_FREQ,dtype='a9')
for i in range(1, CHUNK_FREQ + 1):
colnames[i-1] = "{0:.1f}".format(SAMPLING_RATE / (i * 2)) + "Hz"
return(colnames)
class SoundInputStream:
# class Buffer:
# '''to jest klasa przechowująca kolejne przeanalizowane fragmenty'''
def __init__(self, CHUNK_SIZE=CHUNK_SIZE):
self.CHUNK_SIZE=CHUNK_SIZE
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=FORMAT,
channels=1,
rate=SAMPLING_RATE,
input=True,
frames_per_buffer=CHUNK_SIZE)
# self.workers=Pool()
def __del__(self):
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
def GetFrame(self):
'''zwraca likelihood głosu razem z frame'''
chunk=self.stream.read(self.CHUNK_SIZE)
chunknp=np.frombuffer(buffer=chunk,dtype=np.int16)
return(chunknp)
cls = Klasyfikator(ZrobFFT, nazwy())
negGen = fromWav(path_silence_sample)
negGen2 = fromWav(path_silence_sample)
posGen = fromWav(path_voice_sample)
posGen2 = fromWav(path_voice_sample)
cls.TrainMe(negGen, posGen, negGen2, posGen2)
def SprawdzaczDzwieku(posterioriDicriminator):
spr=SoundInputStream()
while True:
chunk=spr.GetFrame()
print(posterioriDicriminator.GetPosterior(chunk))
SprawdzaczDzwieku(cls)
if __name__ == '__main__':
Main()
|
gpl-2.0
|
dmlc/xgboost
|
tests/benchmark/benchmark_linear.py
|
1
|
2916
|
#pylint: skip-file
import argparse
import xgboost as xgb
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
import time
import ast
rng = np.random.RandomState(1994)
def run_benchmark(args):
try:
dtest = xgb.DMatrix('dtest.dm')
dtrain = xgb.DMatrix('dtrain.dm')
if not (dtest.num_col() == args.columns \
and dtrain.num_col() == args.columns):
raise ValueError("Wrong cols")
if not (dtest.num_row() == args.rows * args.test_size \
and dtrain.num_row() == args.rows * (1-args.test_size)):
raise ValueError("Wrong rows")
except:
print("Generating dataset: {} rows * {} columns".format(args.rows, args.columns))
print("{}/{} test/train split".format(args.test_size, 1.0 - args.test_size))
tmp = time.time()
X, y = make_classification(args.rows, n_features=args.columns, n_redundant=0, n_informative=args.columns, n_repeated=0, random_state=7)
if args.sparsity < 1.0:
X = np.array([[np.nan if rng.uniform(0, 1) < args.sparsity else x for x in x_row] for x_row in X])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size, random_state=7)
print ("Generate Time: %s seconds" % (str(time.time() - tmp)))
tmp = time.time()
print ("DMatrix Start")
dtrain = xgb.DMatrix(X_train, y_train)
dtest = xgb.DMatrix(X_test, y_test, nthread=-1)
print ("DMatrix Time: %s seconds" % (str(time.time() - tmp)))
dtest.save_binary('dtest.dm')
dtrain.save_binary('dtrain.dm')
param = {'objective': 'binary:logistic','booster':'gblinear'}
if args.params is not '':
param.update(ast.literal_eval(args.params))
param['updater'] = args.updater
print("Training with '%s'" % param['updater'])
tmp = time.time()
xgb.train(param, dtrain, args.iterations, evals=[(dtrain,"train")], early_stopping_rounds = args.columns)
print ("Train Time: %s seconds" % (str(time.time() - tmp)))
parser = argparse.ArgumentParser()
parser.add_argument('--updater', default='coord_descent')
parser.add_argument('--sparsity', type=float, default=0.0)
parser.add_argument('--lambda', type=float, default=1.0)
parser.add_argument('--tol', type=float, default=1e-5)
parser.add_argument('--alpha', type=float, default=1.0)
parser.add_argument('--rows', type=int, default=1000000)
parser.add_argument('--iterations', type=int, default=10000)
parser.add_argument('--columns', type=int, default=50)
parser.add_argument('--test_size', type=float, default=0.25)
parser.add_argument('--standardise', type=bool, default=False)
parser.add_argument('--params', default='', help='Provide additional parameters as a Python dict string, e.g. --params \"{\'max_depth\':2}\"')
args = parser.parse_args()
run_benchmark(args)
|
apache-2.0
|
DonBeo/statsmodels
|
statsmodels/examples/ex_generic_mle_t.py
|
29
|
10826
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 08:28:04 2010
Author: josef-pktd
"""
from __future__ import print_function
import numpy as np
from scipy import stats, special
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
#redefine some shortcuts
np_log = np.log
np_pi = np.pi
sps_gamln = special.gammaln
def maxabs(arr1, arr2):
return np.max(np.abs(arr1 - arr2))
def maxabsrel(arr1, arr2):
return np.max(np.abs(arr2 / arr1 - 1))
class MyT(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Poisson Model
This is an example for generic MLE which has the same
statistical model as discretemod.Poisson.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
# copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math :: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
#print len(params),
beta = params[:-2]
df = params[-2]
scale = params[-1]
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx
#Example:
np.random.seed(98765678)
nobs = 1000
rvs = np.random.randn(nobs,5)
data_exog = sm.add_constant(rvs, prepend=False)
xbeta = 0.9 + 0.1*rvs.sum(1)
data_endog = xbeta + 0.1*np.random.standard_t(5, size=nobs)
#print data_endog
modp = MyT(data_endog, data_exog)
modp.start_value = np.ones(data_exog.shape[1]+2)
modp.start_value[-2] = 10
modp.start_params = modp.start_value
resp = modp.fit(start_params = modp.start_value)
print(resp.params)
print(resp.bse)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(modp.start_value, modp.loglike, epsilon=-1e-4)
tmp = modp.loglike(modp.start_value)
print(tmp.shape)
'''
>>> tmp = modp.loglike(modp.start_value)
8
>>> tmp.shape
(100,)
>>> tmp.sum(0)
-24220.877108016182
>>> tmp = modp.nloglikeobs(modp.start_value)
8
>>> tmp.shape
(100, 100)
>>> np.dot(modp.exog, beta).shape
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'beta' is not defined
>>> params = modp.start_value
>>> beta = params[:-2]
>>> beta.shape
(6,)
>>> np.dot(modp.exog, beta).shape
(100,)
>>> modp.endog.shape
(100, 100)
>>> xbeta.shape
(100,)
>>>
'''
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
repr(start_params) array([ 1., 1., 1., 1., 1., 1., 1., 1.])
Optimization terminated successfully.
Current function value: 91.897859
Iterations: 108
Function evaluations: 173
Gradient evaluations: 173
[ 1.58253308e-01 1.73188603e-01 1.77357447e-01 2.06707494e-02
-1.31174789e-01 8.79915580e-01 6.47663840e+03 6.73457641e+02]
[ NaN NaN NaN NaN NaN
28.26906182 NaN NaN]
()
>>> resp.params
array([ 1.58253308e-01, 1.73188603e-01, 1.77357447e-01,
2.06707494e-02, -1.31174789e-01, 8.79915580e-01,
6.47663840e+03, 6.73457641e+02])
>>> resp.bse
array([ NaN, NaN, NaN, NaN,
NaN, 28.26906182, NaN, NaN])
>>> resp.jac
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'GenericLikelihoodModelResults' object has no attribute 'jac'
>>> resp.bsejac
array([ 45243.35919908, 51997.80776897, 41418.33021984,
42763.46575168, 50101.91631612, 42804.92083525,
3005625.35649203, 13826948.68708931])
>>> resp.bsejhj
array([ 1.51643931, 0.80229636, 0.27720185, 0.4711138 , 0.9028682 ,
0.31673747, 0.00524426, 0.69729368])
>>> resp.covjac
array([[ 2.04696155e+09, 1.46643494e+08, 7.59932781e+06,
-2.39993397e+08, 5.62644255e+08, 2.34300598e+08,
-3.07824799e+09, -1.93425470e+10],
[ 1.46643494e+08, 2.70377201e+09, 1.06005712e+08,
3.76824011e+08, -1.21778986e+08, 5.38612723e+08,
-2.12575784e+10, -1.69503271e+11],
[ 7.59932781e+06, 1.06005712e+08, 1.71547808e+09,
-5.94451158e+07, -1.44586401e+08, -5.41830441e+06,
1.25899515e+10, 1.06372065e+11],
[ -2.39993397e+08, 3.76824011e+08, -5.94451158e+07,
1.82871400e+09, -5.66930891e+08, 3.75061111e+08,
-6.84681772e+09, -7.29993789e+10],
[ 5.62644255e+08, -1.21778986e+08, -1.44586401e+08,
-5.66930891e+08, 2.51020202e+09, -4.67886982e+08,
1.78890380e+10, 1.75428694e+11],
[ 2.34300598e+08, 5.38612723e+08, -5.41830441e+06,
3.75061111e+08, -4.67886982e+08, 1.83226125e+09,
-1.27484996e+10, -1.12550321e+11],
[ -3.07824799e+09, -2.12575784e+10, 1.25899515e+10,
-6.84681772e+09, 1.78890380e+10, -1.27484996e+10,
9.03378378e+12, 2.15188047e+13],
[ -1.93425470e+10, -1.69503271e+11, 1.06372065e+11,
-7.29993789e+10, 1.75428694e+11, -1.12550321e+11,
2.15188047e+13, 1.91184510e+14]])
>>> hb
array([[ 33.68732564, -2.33209221, -13.51255321, -1.60840159,
-13.03920385, -9.3506543 , 4.86239173, -9.30409101],
[ -2.33209221, 3.12512611, -6.08530968, -6.79232244,
3.66804898, 1.26497071, 5.10113409, -2.53482995],
[ -13.51255321, -6.08530968, 31.14883498, -5.01514705,
-10.48819911, -2.62533035, 3.82241581, -12.51046342],
[ -1.60840159, -6.79232244, -5.01514705, 28.40141917,
-8.72489636, -8.82449456, 5.47584023, -18.20500017],
[ -13.03920385, 3.66804898, -10.48819911, -8.72489636,
9.03650914, 3.65206176, 6.55926726, -1.8233635 ],
[ -9.3506543 , 1.26497071, -2.62533035, -8.82449456,
3.65206176, 21.41825348, -1.28610793, 4.28101146],
[ 4.86239173, 5.10113409, 3.82241581, 5.47584023,
6.55926726, -1.28610793, 46.52354448, -32.23861427],
[ -9.30409101, -2.53482995, -12.51046342, -18.20500017,
-1.8233635 , 4.28101146, -32.23861427, 178.61978279]])
>>> np.linalg.eigh(hb)
(array([ -10.50373649, 0.7460258 , 14.73131793, 29.72453087,
36.24103832, 41.98042979, 48.99815223, 190.04303734]), array([[-0.40303259, 0.10181305, 0.18164206, 0.48201456, 0.03916688,
0.00903695, 0.74620692, 0.05853619],
[-0.3201713 , -0.88444855, -0.19867642, 0.02828812, 0.16733946,
-0.21440765, -0.02927317, 0.01176904],
[-0.41847094, 0.00170161, 0.04973298, 0.43276118, -0.55894304,
0.26454728, -0.49745582, 0.07251685],
[-0.3508729 , -0.08302723, 0.25004884, -0.73495077, -0.38936448,
0.20677082, 0.24464779, 0.11448238],
[-0.62065653, 0.44662675, -0.37388565, -0.19453047, 0.29084735,
-0.34151809, -0.19088978, 0.00342713],
[-0.15119802, -0.01099165, 0.84377273, 0.00554863, 0.37332324,
-0.17917015, -0.30371283, -0.03635211],
[ 0.15813581, 0.0293601 , 0.09882271, 0.03515962, -0.48768565,
-0.81960996, 0.05248464, 0.22533642],
[-0.06118044, -0.00549223, 0.03205047, -0.01782649, -0.21128588,
-0.14391393, 0.05973658, -0.96226835]]))
>>> np.linalg.eigh(np.linalg.inv(hb))
(array([-0.09520422, 0.00526197, 0.02040893, 0.02382062, 0.02759303,
0.03364225, 0.06788259, 1.34043621]), array([[-0.40303259, 0.05853619, 0.74620692, -0.00903695, -0.03916688,
0.48201456, 0.18164206, 0.10181305],
[-0.3201713 , 0.01176904, -0.02927317, 0.21440765, -0.16733946,
0.02828812, -0.19867642, -0.88444855],
[-0.41847094, 0.07251685, -0.49745582, -0.26454728, 0.55894304,
0.43276118, 0.04973298, 0.00170161],
[-0.3508729 , 0.11448238, 0.24464779, -0.20677082, 0.38936448,
-0.73495077, 0.25004884, -0.08302723],
[-0.62065653, 0.00342713, -0.19088978, 0.34151809, -0.29084735,
-0.19453047, -0.37388565, 0.44662675],
[-0.15119802, -0.03635211, -0.30371283, 0.17917015, -0.37332324,
0.00554863, 0.84377273, -0.01099165],
[ 0.15813581, 0.22533642, 0.05248464, 0.81960996, 0.48768565,
0.03515962, 0.09882271, 0.0293601 ],
[-0.06118044, -0.96226835, 0.05973658, 0.14391393, 0.21128588,
-0.01782649, 0.03205047, -0.00549223]]))
>>> np.diag(np.linalg.inv(hb))
array([ 0.01991288, 1.0433882 , 0.00516616, 0.02642799, 0.24732871,
0.05281555, 0.02236704, 0.00643486])
>>> np.sqrt(np.diag(np.linalg.inv(hb)))
array([ 0.14111302, 1.02146375, 0.07187597, 0.16256686, 0.49732154,
0.22981633, 0.14955616, 0.08021756])
>>> hess = modp.hessian(resp.params)
>>> np.sqrt(np.diag(np.linalg.inv(hess)))
array([ 231.3823423 , 117.79508218, 31.46595143, 53.44753106,
132.4855704 , NaN, 5.47881705, 90.75332693])
>>> hb=-approx_hess(resp.params, modp.loglike, epsilon=-1e-4)
>>> np.sqrt(np.diag(np.linalg.inv(hb)))
array([ 31.93524822, 22.0333515 , NaN, 29.90198792,
38.82615785, NaN, NaN, NaN])
>>> hb=-approx_hess(resp.params, modp.loglike, epsilon=-1e-8)
>>> np.sqrt(np.diag(np.linalg.inv(hb)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 423, in inv
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 306, in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
>>> resp.params
array([ 1.58253308e-01, 1.73188603e-01, 1.77357447e-01,
2.06707494e-02, -1.31174789e-01, 8.79915580e-01,
6.47663840e+03, 6.73457641e+02])
>>>
'''
|
bsd-3-clause
|
raghavrv/scikit-learn
|
benchmarks/bench_glmnet.py
|
111
|
3890
|
"""
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of matplotlib.pyplot
import matplotlib.pyplot as plt
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
plt.clf()
xx = range(0, n * step, step)
plt.title('Lasso regression on sample dataset (%d features)' % n_features)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of samples to classify')
plt.ylabel('Time (s)')
plt.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
plt.figure('scikit-learn vs. glmnet benchmark results')
plt.title('Regression in high dimensional spaces (%d samples)' % n_samples)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
coufon/neon-distributed
|
examples/conv_autoencoder.py
|
3
|
2945
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Convolutional autoencoder example network for MNIST data set
"""
import numpy as np
from neon.data import ArrayIterator, load_mnist
from neon.initializers import Uniform
from neon.layers import Conv, Pooling, GeneralizedCost, Deconv
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, SumSquared
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# Load dataset
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
# Set input and target to X_train
train = ArrayIterator(X_train, lshape=(1, 28, 28))
# Initialize the weights and the learning rule
init_uni = Uniform(low=-0.1, high=0.1)
opt_gdm = GradientDescentMomentum(learning_rate=0.001, momentum_coef=0.9)
# Define the layers
layers = [Conv((4, 4, 8), init=init_uni, activation=Rectlin()),
Pooling(2),
Conv((4, 4, 32), init=init_uni, activation=Rectlin()),
Pooling(2),
Deconv(fshape=(4, 4, 8), init=init_uni, activation=Rectlin()),
Deconv(fshape=(3, 3, 8), init=init_uni, activation=Rectlin(), strides=2),
Deconv(fshape=(2, 2, 1), init=init_uni, strides=2, padding=1)]
# Define the cost
cost = GeneralizedCost(costfunc=SumSquared())
model = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(model, **args.callback_args)
# Fit the model
model.fit(train, optimizer=opt_gdm, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
# Plot the reconstructed digits
try:
from matplotlib import pyplot, cm
fi = 0
nrows = 10
ncols = 12
test = np.zeros((28*nrows, 28*ncols))
idxs = [(row, col) for row in range(nrows) for col in range(ncols)]
for row, col in idxs:
im = model.layers.layers[-1].outputs.get()[:, fi].reshape((28, 28))
test[28*row:28*(row+1):, 28*col:28*(col+1)] = im
fi = fi + 1
pyplot.matshow(test, cmap=cm.gray)
pyplot.savefig('Reconstructed.png')
except ImportError:
print 'matplotlib needs to be manually installed to generate plots'
|
apache-2.0
|
glauffer/final_project
|
notebooks/plot_histograma.py
|
1
|
2272
|
import numpy as np
import matplotlib.pyplot as plt
dados = np.array(np.loadtxt('/home/glauffer/Dropbox/FURG/final_project/data/rrab_ti_tf_pontos.txt'))
#Histograma t_i
fig,(ax,ax2) = plt.subplots(1, 2, sharey=True)
a, b, c = ax.hist(dados.T[0], bins=90)
ax2.hist(dados.T[0], bins=90)
ax.set_xlim(0,1000) # most of the data
ax2.set_xlim(2000,2500) # outliers only
# hide the spines between ax and ax2
ax.spines['right'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax.yaxis.tick_left()
ax.tick_params(labeltop='off') # don't put tick labels at the top
ax2.yaxis.tick_right()
# Make the spacing between the two axes a bit smaller
plt.subplots_adjust(wspace=0.15)
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((1-d,1+d),(-d,+d), **kwargs) # top-left diagonal
ax.plot((1-d,1+d),(1-d,1+d), **kwargs) # bottom-left diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d,d),(-d,+d), **kwargs) # top-right diagonal
ax2.plot((-d,d),(1-d,1+d), **kwargs) # bottom-right diagonal
#plt.title('Data Juliana $t_i$ RRab, max = %s'%b[np.argmax(a)])
#plt.suptitle('HJD $t_i$ max = %s'%b[np.argmax(a)], fontsize = 18)
plt.suptitle('Histograma - $t_i$', fontsize = 18)
#ax.set_ylabel('Frequência', fontsize = 14)
#ax2.set_xlabel('Tempo Inicial')
fig.text(0.5, 0.0, 'Tempo Inicial [JD]', ha='center', fontsize = 14)
fig.text(0.0, 0.5, 'Frequência', va='center', rotation='vertical', fontsize = 14)
print('t_i maximo = %s'%b[np.argmax(a)])
plt.savefig('hist_ti.png', dpi=200)
plt.clf()
#Histograma t_f
a, b, c = plt.hist(dados.T[1], bins=300)
plt.xlim(4300, 4600)
plt.suptitle('Histograma - $t_f$', fontsize=18)
plt.xlabel('Tempo Final [JD]', fontsize=14)
plt.ylabel('Frequência', fontsize=14)
plt.savefig('hist_tf.png', dpi=200)
print('t_f maximo = %s'%b[np.argmax(a)])
plt.clf()
#Histograma n
a, b, c = plt.hist(dados.T[2], bins=140)
plt.xlim(0,1400)
plt.title('Histograma - $n$', fontsize=18)
plt.xlabel('Quantidade de pontos', fontsize=14)
plt.ylabel('Frequência', fontsize=14)
b[np.argmax(a)]
plt.savefig('hist_n.png', dpi=200)
print('n maximo = %s'%b[np.argmax(a)])
|
apache-2.0
|
sosey/ginga
|
ginga/mplw/transform.py
|
6
|
10341
|
#
# transform.py -- a custom projection for supporting matplotlib plotting
# on ginga
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
# NOTE: this code is based on "custom_projection_example.py", an example
# script developed by matplotlib developers
# See http://matplotlib.org/examples/api/custom_projection_example.html
#
from __future__ import print_function
import matplotlib
from matplotlib.axes import Axes
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, BboxTransformTo, Transform, \
blended_transform_factory
from matplotlib.projections import register_projection
import numpy as np
from ginga.util.six.moves import map, zip
class GingaAxes(Axes):
"""
This is a custom matplotlib projection to support matplotlib plotting
on a ginga-rendered image in a matplotlib Figure.
This code is based on 'custom_projection_example.py', an example
script developed by matplotlib developers.
"""
# The projection must specify a name. This will be used be the
# user to select the projection, i.e. ``subplot(111,
# projection='ginga')``.
name = 'ginga'
def __init__(self, *args, **kwargs):
# this is the Ginga object
self.viewer = kwargs.pop('viewer', None)
Axes.__init__(self, *args, **kwargs)
## self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def set_viewer(self, viewer):
self.viewer = viewer
self.transData.viewer = viewer
def _set_lim_and_transforms(self):
"""
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# There are three important coordinate spaces going on here:
#
# 1. Data space: The space of the data itself
#
# 2. Axes space: The unit rectangle (0, 0) to (1, 1)
# covering the entire plot area.
#
# 3. Display space: The coordinates of the resulting image,
# often in pixels or dpi/inch.
# This function makes heavy use of the Transform classes in
# ``lib/matplotlib/transforms.py.`` For more information, see
# the inline documentation there.
# The goal of the first two transformations is to get from the
# data space to axes space. It is separated into a non-affine
# and affine part so that the non-affine part does not have to be
# recomputed when a simple affine change to the figure has been
# made (such as resizing the window or changing the dpi).
# 3) This is the transformation from axes space to display
# space.
self.transAxes = BboxTransformTo(self.bbox)
# Now put these 3 transforms together -- from data all the way
# to display coordinates. Using the '+' operator, these
# transforms will be applied "in order". The transforms are
# automatically simplified, if possible, by the underlying
# transformation framework.
#self.transData = \
# self.transProjection + self.transAffine + self.transAxes
self.transData = self.GingaTransform()
self.transData.viewer = self.viewer
# self._xaxis_transform = blended_transform_factory(
# self.transData, self.transAxes)
# self._yaxis_transform = blended_transform_factory(
# self.transAxes, self.transData)
self._xaxis_transform = self.transData
self._yaxis_transform = self.transData
# Prevent the user from applying scales to one or both of the
# axes. In this particular case, scaling the axes wouldn't make
# sense, so we don't allow it.
def set_xscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_xscale(self, *args, **kwargs)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
Axes.set_yscale(self, *args, **kwargs)
# Prevent the user from changing the axes limits. This also
# applies to interactive panning and zooming in the GUI interfaces.
## def set_xlim(self, *args, **kwargs):
## print "Setting xlim!", args
## def set_ylim(self, *args, **kwargs):
## print "Setting ylim!", args
def format_coord(self, x, y):
"""
Override this method to change how the values are displayed in
the status bar.
"""
return 'x=%f, y=%f' % (x, y)
def get_data_ratio(self):
"""
Return the aspect ratio of the data itself.
This method should be overridden by any Axes that have a
fixed data ratio.
"""
return 1.0
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
# TODO: get zoom box working
return False
def can_pan(self):
"""
Return True if this axes support the zoom box
"""
return True
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
bd = self.viewer.get_bindings()
data_x, data_y = self.viewer.get_data_xy(x, y)
bd.ms_pan(self.viewer, 'down', data_x, data_y)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
bd = self.viewer.get_bindings()
data_x, data_y = self.viewer.get_last_data_xy()
bd.ms_pan(self.viewer, 'up', data_x, data_y)
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
bd = self.viewer.get_bindings()
data_x, data_y = self.viewer.get_data_xy(x, y)
bd.ms_pan(self.viewer, 'move', data_x, data_y)
# Now, the transforms themselves.
class GingaTransform(Transform):
"""
The base Ginga transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
viewer = None
#pass_through = True
def invalidate(self):
#print("I don't feel validated! (%s)" % (self.pass_through))
return Transform.invalidate(self)
def transform_non_affine(self, xy):
"""
Override the transform_non_affine method to implement the custom
transform.
The input and output are Nx2 numpy arrays.
"""
#print(("transform in:", xy))
if self.viewer is None:
return xy
res = np.dstack(self.viewer.get_canvas_xy(xy.T[0], xy.T[1]))[0]
#print(("transform out:", res))
return res
# This is where things get interesting. With this projection,
# straight lines in data space become curves in display space.
# This is done by interpolating new values between the input
# values of the data. Since ``transform`` must not return a
# differently-sized array, any transform that requires
# changing the length of the data array must happen within
# ``transform_path``.
def transform_path_non_affine(self, path):
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = \
Transform.transform_path_non_affine.__doc__
if matplotlib.__version__ < '1.2':
# Note: For compatibility with matplotlib v1.1 and older, you'll
# need to explicitly implement a ``transform`` method as well.
# Otherwise a ``NotImplementedError`` will be raised. This isn't
# necessary for v1.2 and newer, however.
transform = transform_non_affine
# Similarly, we need to explicitly override ``transform_path`` if
# compatibility with older matplotlib versions is needed. With v1.2
# and newer, only overriding the ``transform_path_non_affine``
# method is sufficient.
transform_path = transform_path_non_affine
transform_path.__doc__ = Transform.transform_path.__doc__
def inverted(self):
tform = GingaAxes.InvertedGingaTransform()
tform.viewer = self.viewer
return tform
inverted.__doc__ = Transform.inverted.__doc__
class InvertedGingaTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
viewer = None
def transform_non_affine(self, xy):
#print "transform in:", xy
if self.viewer is None:
return xy
res = np.dstack(self.viewer.get_data_xy(xy.T[0], xy.T[1]))[0]
#print "transform out:", res
return res
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
# As before, we need to implement the "transform" method for
# compatibility with matplotlib v1.1 and older.
if matplotlib.__version__ < '1.2':
transform = transform_non_affine
def inverted(self):
# The inverse of the inverse is the original transform... ;)
tform = GingaAxes.GingaTransform()
tform.viewer = self.viewer
return tform
inverted.__doc__ = Transform.inverted.__doc__
# Now register the projection with matplotlib so the user can select
# it.
register_projection(GingaAxes)
#END
|
bsd-3-clause
|
pjh/vm-analyze
|
analyze/simulate_segments_lib.py
|
1
|
60343
|
# Virtual memory analysis scripts.
# Developed 2012-2014 by Peter Hornyack, [email protected]
# Copyright (c) 2012-2014 Peter Hornyack and University of Washington
from trace.vm_regex import *
from util.pjh_utils import *
from analyze.process_group_class import *
from trace.vm_common import *
import itertools
import os
import re
import shutil
from collections import defaultdict
min_segment_size = 1 # must be a power of 2!!!
#############################################################################
def scale_addr(addr):
#ADDR_SCALE_FACTOR = 1 # divide addrs to avoid signed int problems...
ADDR_SCALE_FACTOR = 2 # divide addrs to avoid signed int problems...
return int(addr / ADDR_SCALE_FACTOR)
# segset is a dictionary of segments: keys are segment sizes, values are
# tuples:
# (num-segments, max-num-segments)
# plot_fname will have .png appended to it.
def segset_to_plot(segset, plot_fname, plot_title, pid_pdf):
tag = "segset_to_plot"
plot_fname = "{0}.png".format(plot_fname)
print_debug(tag, "Writing segset plot to file {0}".format(plot_fname))
scale_factor = 2.0
figsize = (8*scale_factor, 6*scale_factor)
# Default figsize is (8,6): leads to an 800x600 .png image
plt.figure(num=1, figsize=figsize)
# http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.figure
bar_kwargs = { # dictionary of plot attributes
'visible' : True,
} #http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter
#label_kwargs = {
# 'size' : 'large', # x-large, xx-large
#}
seg_count = 0
max_seg_count = 0
width = 100.0
if segset:
xvals = sorted(segset.keys())
else:
xvals = []
counts = []
max_counts = []
ymax = 0
for x in xvals:
(count, maxcount) = segset[x]
counts.append(count)
max_counts.append(maxcount)
if max(count, maxcount) > ymax:
ymax = max(count, maxcount)
seg_count += count
max_seg_count += maxcount
#plt.title("{0}: segment sizes - up to {1} total segments".format(
# prog_name, max_seg_count))
if 'sudo' in plot_title: # HACK alert:
plot_title = 'Cassandra'
plt.title(plot_title, **title_kwargs)
print_debug(tag, ("xvals: {0}").format(xvals))
print_debug(tag, ("counts: {0}").format(counts))
print_debug(tag, ("max_counts: {0}").format(max_counts))
# Plot columns one at a time, in order to set column width
# appropriately for log scale:
width_factor = 10 # smaller factor = wider columns
for i in range(0, len(xvals)):
tick = xvals[i]
count = counts[i]
max_count = max_counts[i]
bar_width = tick / width_factor # right-er ticks have greater width
plot_both_bars = False # HACK!
if plot_both_bars:
#left = tick - (bar_width/2)
left = tick - bar_width
plt.bar([left], [count], width=bar_width, bottom=0,
color="red", **bar_kwargs)
plt.bar([left+bar_width], [max_count], width=bar_width, bottom=0,
color="orange", **bar_kwargs)
else:
left = tick
plt.bar([left], [count], width=bar_width, bottom=0,
#color="red", **bar_kwargs)
color='green', **bar_kwargs)
xticks = []
xlabels = []
if len(xvals) == 0:
xticks = [0, 1]
xlabels = ["0 B", "1 B"]
else:
pow2 = 1
#labels = itertools.cycle([1, 4, 16, 64, 256])
while True:
if pow2 < 1024:
pow2 *= 4
continue
xticks.append(pow2)
if pow2 < KB_BYTES:
label = 'B'
num = str(pow2)
elif pow2 < MB_BYTES:
label = 'KB'
num = str(pow2 / KB_BYTES)
elif pow2 < GB_BYTES:
label = 'MB'
num = str(pow2 / MB_BYTES)
elif pow2 < TB_BYTES:
label = 'GB'
num = str(pow2 / GB_BYTES)
else:
label = 'TB'
num = str(pow2 / TB_BYTES)
xlabels.append(("{0} {1}").format(num, label))
if pow2 >= xvals[-1]:
break
pow2 *= 4
print_debug(tag, ("xticks: {0}").format(xticks))
print_debug(tag, ("xlabels: {0}").format(xlabels))
ax = plt.axes()
ax.set_xscale("log")
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels, rotation='vertical', **smallticklabel_kwargs)
ax.set_xlabel(("Segment size").format(), **axislabel_kwargs)
#ax.set_xlim(xticks[0], xticks[-1])
ax.set_xlim(xticks[0] - xticks[0]/width_factor,
xticks[-1] + xticks[-1]/width_factor)
ax.set_ylabel("Count", **axislabel_kwargs)
ax.set_ylim(0, ymax+10)
ax.tick_params(axis='both', labelsize=plotconf['ticklabelsize'])
# "shrink" the plot up, so that labels don't go off the bottom:
box = ax.get_position()
ax.set_position([box.x0, box.y0 * 1.4, box.width, box.height])
# Save plot:
if pid_pdf:
pid_pdf.savefig()
plt.savefig(plot_fname)
plt.close()
# Don't forget, or next plot will be drawn on top of previous
# one!
return
#############################################################################
# A processes_tracker is a container for multiple process_info objects.
class processes_tracker:
"""docstring..."""
tag = "class processes_tracker"
# Members:
proc_dict = None
def __init__(self):
tag = "{0}.__init__".format(self.tag)
self.reset()
return
def reset(self):
tag = "{0}.__init__".format(self.tag)
self.proc_dict = dict()
return
def make_copy(self):
tag = "{0}.copy".format(self.tag)
new_proc_tracker = processes_tracker()
new_proc_tracker.proc_dict = self.proc_dict.copy()
return new_proc_tracker
# Returns the process_info object for the specified pid.
# Returns None if the pid is not found.
def get_process_info(self, pid):
tag = "{0}.get_process_info".format(self.tag)
if pid is None:
print_warning(tag, ("got None pid").format())
return None
try:
proc_info = self.proc_dict[pid]
except KeyError:
return None
if proc_info.get_pid() != pid:
print_error_exit(tag, ("got process_info {0} from proc_dict, "
"but its pid doesn't match lookup pid {1}").format(
proc_info.to_str(), pid))
return proc_info
'''
def set_process_info(self, pid, proc_info):
tag = "{0}.set_process_info".format(self.tag)
self.proc_dict[pid] = proc_info
print_debug(tag, ("set proc_dict[{0}] = {1}").format(
pid, proc_info.to_str()))
return
'''
# Inserts proc_info into the dictionary of process_infos that are
# being tracked. The pid of the process_info object is used as a
# unique key for tracking processes. This method does not check if
# a process_info object is already being tracked or not for the
# specified proc_info.pid, so this method is also used to "update"
# process_infos.
def insert_process_info(self, proc_info):
tag = "{0}.info_process_info".format(self.tag)
self.proc_dict[proc_info.get_pid()] = proc_info
print_debug(tag, ("set proc_dict[{0}] = {1}").format(
proc_info.get_pid(), proc_info.to_str()))
return
# Returns a list of all of the process_info objects that are being
# tracked, sorted by ascending pid.
def get_all_process_infos(self):
return sorted(self.proc_dict.values(),
key=lambda proc_info: proc_info.get_pid())
# Returns a list of all of the proc_infos for the tgids stored in
# the parent's children field. On error, returns None.
def get_child_process_infos(self, parent_tgid):
l = []
try:
parent = self.proc_dict[parent_tgid]
for child_tgid in parent.get_children_tgids():
try:
l.append(self.proc_dict[child_tgid])
except KeyError:
return None
except KeyError:
return None
return l
def get_all_root_process_infos(self):
l = []
for proc_info in self.proc_dict.values():
if proc_info.get_is_rootproc():
l.append(proc_info)
return sorted(l, key=lambda proc_info: proc_info.get_pid())
def num_tracked(self):
return len(self.proc_dict)
def write_process_maps(self, output_f):
for proc_info in self.get_all_process_infos():
proc_info.write_proc_map(output_f)
output_f.write(("\n").format())
return
#############################################################################
PROCESS_INFO_UNKNOWN_NAME = 'unknown'
class process_info:
"""docstring for process_info class..."""
tag = "class process_info"
# Members: not all must/will be used
progname = None
speculative_progname = None
pid = None
ptgid = None # parent's tgid
children = None
is_rootproc = None # is this a "root" processes during the trace?
tgid_for_stats = None # tgid to "transfer" stats to
context = None
# "context" dict: things like
# brk: location of program break pointer
# open_fds: list of open file descriptors
# mmap_stats: (at-addr count, total mapped size)
# # todo: mmap_stats should be moved into stats dict...
saw_fork = None
saw_exec = None
exec_follows_fork = None
stats = None
segset = None # todo: make a "SegSet" class
syscall_cmd = None
syscall_args = None
#vma_module_map = None
#vma_fn_map = None
vmatable = None # only the vmas currently mapped into process
all_vmas = None # all vmas ever
cp_vmas = None # all_vmas since previous checkpoint reset
use_bprm = False
bprm_vma = None
vma_hash_fn = None
# The plain "vmatable" keeps track of just the vmas that are currently
# present in the process' virtual memory mapping. all_vmas keeps
# track of *every* vma that was ever present in the process' memory
# map. all_vmas is a map - vma_hash_fn takes a vm_mapping as its
# argument and returns the key it should be hashed into all_vmas with.
# cp_vmas is like all_vmas, but it may be "reset" by checkpoints along
# the way as we analyze the trace.
rq_counts = None # "read-quantum"
wq_counts = None # "write-quantum"
r_counts = None # reads
w_counts = None # writes
zero_quanta = None
traced_by_pin = None
# Use these to keep track of the points in time where this process
# had the greatest number of allocated vmas and the greatest virtual
# memory size:
vma_count = None
max_vma_count = None
max_vma_count_time = None
total_vm_size = None
max_vm_size = None
max_vm_size_time = None
rss_pages = None
# pid is required for construction; everything else may be set later.
def __init__(self, pid):
tag = "{0}.__init__".format(self.tag)
# "tgid_for_stats" default to the pid used to initialize the
# proc_info, but may be changed later.
self.pid = pid
self.tgid_for_stats = pid
self.reset()
def reset(self):
tag = "{0}.reset".format(self.tag)
# Call constructors for dict(), list(), etc. explicitly, to ensure
# that every process_info has its own...
self.progname = PROCESS_INFO_UNKNOWN_NAME
self.speculative_progname = None
self.context = dict()
self.context["brk"] = int(0x0)
self.context["open_fds"] = list()
#self.context["mmap_stats"] = (0, 0)
self.saw_fork = False
self.saw_exec = False
self.exec_follows_fork = False
self.stats = dict()
self.segset = None # set later, by "strategy" code...
self.syscall_cmd = None
self.syscall_args = None
self.vmatable = dict()
self.all_vmas = dict()
self.cp_vmas = dict()
self.use_bprm = False
self.bprm_vma = None
#self.vma_module_map = dict()
#self.vma_fn_map = dict()
self.vma_hash_fn = None
self.rq_counts = list()
self.wq_counts = list()
self.r_counts = list()
self.w_counts = list()
self.zero_quanta = 0
self.traced_by_pin = False
self.children = list()
self.vma_count = 0
self.max_vma_count = 0
self.max_vma_count_time = -1
self.total_vm_size = 0
self.max_vm_size = 0
self.max_vm_size_time = -1
self.rss_pages = defaultdict(int)
# Leave alone: pid, ptgid, is_rootproc, tgid_for_stats
return
# Resets proc_info data that is relevant to the ongoing simulation /
# analysis. Sometimes we want to do a "full" reset that blows away
# all of the previous virtual memory mappings that have been seen
# (i.e. when the kernel emits a reset-sim event after a process exec).
# Other times we only want to do a "checkpoint" reset that resets
# cp_vmas and not much else.
# Note that this function does NOT change the proc_info's context
# dict!
def reset_sim_data(self, cp_or_full):
tag = "{0}.reset_sim_data".format(self.tag)
# What simulation data is tracked in the proc_info struct that
# we'd like to reset?
# stats, segset, vmatable, all_vmas, cp_vmas
# What should stay the same?
# progname, pid, ptgid, is_rootproc, tgid_for_stats, children
# context? Should stay the same here, user may need to adjust though!
# syscall_cmd / args
# vma_hash_fn
# saw_fork and saw_exec
# Things that will only be reset on a full reset:
if cp_or_full == 'full':
print_debug(tag, ("resetting stats dict that contains: "
"{0}").format(stats_to_str(self.stats)))
print_error_exit(tag, ("this is dead code, I think; "
"if you need it, must review other things I've "
"added to process_info objects, like the things "
"adjusted in track_vm_size()").format())
self.stats = dict()
self.segset = None # BUG: needs to be set to dict()?
self.vmatable = dict()
self.all_vmas = dict()
self.use_bprm = False
self.bprm_vma = None
# I think this makes sense, but right now it doesn't really
# matter: read / write events only come after the sim_reset
# event anyway.
self.end_sched_quantum()
self.rq_counts = list()
self.wq_counts = list()
self.r_counts = list()
self.w_counts = list()
self.zero_quanta = 0
elif cp_or_full != 'cp':
print_error_exit(tag, ("invalid cp_or_full: {0}").format(
cp_or_full))
# Things that will always be reset, on either a full reset or
# a checkpoint reset:
self.cp_vmas = dict()
# todo: eventually may want to add a separate stats dict for
# between-checkpoints...
return
def to_str(self):
return ("process_info[progname={0}, pid={1}]").format(
self.progname, self.pid)
def get_progname(self):
return self.progname
def get_pid(self):
return self.pid
def name(self):
return "{}-{}".format(self.progname, self.pid)
def get_ptgid(self):
return self.ptgid
def get_children_tgids(self):
return self.children
def get_context(self):
return self.context
def get_stats(self):
return self.stats
def get_segset(self):
return self.segset
def get_syscall_cmd(self):
return self.syscall_cmd
def get_syscall_args(self):
return self.syscall_args
def get_vmatable(self):
return self.vmatable
# Returns a list of the vmas stored in the specified table. For the
# all_vmas and cp_vmas tables, the lists of vmas stored for EACH
# key will all be appended together. If the sort argument is True,
# then these lists will be sorted by start-address (after they have
# all been appended together).
def get_vmalist(self, whichtable, sort):
tag = "{}:get_vmalist".format(self.tag)
vmas = []
if whichtable == 'vmatable':
vmas = self.vmatable.values()
elif whichtable == 'all_vmas':
for keylist in self.all_vmas.values():
vmas += keylist # list concatenate
elif whichtable == 'cp_vmas':
for keylist in self.cp_vmas.values():
vmas += keylist # list concatenate
else:
print_error_exit(tag, ("invalid whichtable {}").format(
whichtable))
if sort:
return sorted(vmas, key=lambda vma: vma.start_addr)
else:
return vmas
def get_tgid_for_stats(self):
return self.tgid_for_stats
def is_traced_by_pin(self):
return self.traced_by_pin
def is_progname_set(self):
if ((not self.progname) or
(self.progname == PROCESS_INFO_UNKNOWN_NAME) or
(self.progname == '<...>')):
return False
return True
def is_speculative_progname_set(self):
return self.speculative_progname is not None
def is_ptgid_set(self):
return self.ptgid != None
def get_is_rootproc(self):
return self.is_rootproc
def set_progname(self, progname):
tag = "{}.set_progname".format(self.tag)
self.progname = progname
if (self.speculative_progname and
progname != self.speculative_progname):
# It turns out that this can happen in the ridiculous
# case explained in lookahead_fork_exec() where the
# first thing that a forked process does is fork another
# process. Ugh. However, in that case, we *set* the spec
# progname, but never actually use it, because tgid_for_stats
# works to get the correct plotting appname (tgid_for_stats
# only doesn't work and the spec progname is used in other
# weird cases where apache2 and chrome are forked from
# other processes that we end up ignoring). Anyway,
# don't perform strict check here;
# if we actually do end up using the wrong speculative
# progname for plotting, you'll realize it later.
print_unexpected(False, tag, ("proc_info {}: speculative_"
"progname was {}, but now setting progname to "
"{}!").format(self.pid, self.speculative_progname,
progname))
return
def set_speculative_progname(self, progname):
self.speculative_progname = progname
def set_pid(self, pid):
self.pid = pid
def set_ptgid(self, ptgid):
self.ptgid = ptgid
def set_is_rootproc(self, is_rootproc):
self.is_rootproc = is_rootproc
def set_tgid_for_stats(self, tgid_for_stats):
self.tgid_for_stats = tgid_for_stats
def set_segset(self, segset):
self.segset = segset
def set_syscall_cmd(self, syscall_cmd):
self.syscall_cmd = syscall_cmd
def set_syscall_args(self, syscall_args):
self.syscall_args = syscall_args
def add_to_stats(self, key, n):
tag = "{0}.add_to_stats".format(self.tag)
add_to_stats_dict(self.stats, key, n)
def set_vma_hash_fn(self, fn):
self.vma_hash_fn = fn
def set_traced_by_pin(self):
self.traced_by_pin = True
def stats_to_str(self):
return stats_to_str(self.stats)
def context_to_str(self):
return context_to_str(self.context)
def segset_to_str(self):
return segset_to_str(self.segset)
def vmatable_to_str(self):
return vmatable_to_str(self.vmatable)
def segset_count(self):
return segset_count(self.segset)
def vmatable_count(self):
return vmatable_count(self.vmatable)
def set_saw_fork(self, yesno):
self.saw_fork = yesno
return
def set_saw_exec(self, yesno):
self.saw_exec = yesno
return
def set_exec_follows_fork(self, yesno):
self.exec_follows_fork = yesno
return
# If we have seen either the fork ('dup_mmap' events) or the exec
# ('__bprm_mm_init' event) for this proc_info, then we know that
# we should have full information for it - it was started during
# our trace (not before the trace started), and we should know
# about all of its vmas.
# Actually, this isn't quite true - we could happen to start a
# trace just after a process' fork, but in time to see its exec,
# which will then start removing vmas from the address space
# that we don't actually know about. This happened in one trace
# so far...
def have_full_info(self):
return (self.saw_fork or self.saw_exec)
def be_strict(self):
# If we have full information about the process (we've seen either
# its fork or its exec or both), then be strict about our checking
# and assertions.
return self.have_full_info()
# Note that children tracked via this method are not necessarily
# direct children of the process; they could be grandchildren /
# grand-grandchildren / etc., but for measurement purposes we want
# to group them with this top-level "root" process.
def add_child(self, child_tgid):
if not self.is_rootproc:
print_error_exit(tag, ("adding a child {} to a proc_info {} "
"that's not a rootproc - is this right?").format(
child_tgid, self.name()))
self.children.append(child_tgid)
return
# vma is a vm_mapping object. This function will add the vma to both
# all_vmas and cp_vmas!
def add_to_all_vmas(self, vma):
tag = "{0}.add_to_all_vmas".format(self.tag)
if not self.vma_hash_fn:
print_error_exit(tag, ("self.vma_hash_fn is not defined").format())
# Hash the vma and append it to the list of vmas with that hash value
# in the all_vmas map.
key = self.vma_hash_fn(vma)
for d in [self.all_vmas, self.cp_vmas]:
try:
vmalist = d[key]
except KeyError:
vmalist = list()
d[key] = vmalist
# I think this should work: only need to set the
# mapping once, then list itself is mutable (appending
# to it doesn't change its identity). This page says
# that lists are mutable:
# http://docs.python.org/3/reference/datamodel.html
vmalist.append(vma)
return
# internal helper function:
def get_all_vma_list(self):
# Construct a list of all vmas by appending together all of
# the lists that are kept in the all_vmas dict:
print_error_exit('get_all_vma_list', ("I think this method "
"is deprecated - use get_vmalist() instead!").format())
all_vma_list = []
for vma_list in self.all_vmas.values():
all_vma_list += vma_list
return all_vma_list
def get_cp_vma_list(self):
# Construct a list of all vmas by appending together all of
# the lists that are kept in the cp_vmas dict:
print_error_exit('get_cp_vma_list', ("I think this method "
"is deprecated - use get_vmalist() instead!").format())
cp_vma_list = []
for vma_list in self.cp_vmas.values():
cp_vma_list += vma_list
return cp_vma_list
# Iterates over the entire all_vmas structure and calls the query_fn
# on each vma. The query_fn should return a key (string or numeric)
# when passed a vm_mapping object. query_all_vmas will return a new
# mapping from these keys to the *list of* vmas that "satisfy" that key.
# If the query_fn returns None, then the vma that was passed as an
# argument will not be included anywhere in the returned mapping.
#
# For example, a simple case would be a query_fn that examines vmas
# and returns either "True" or "False" depending on whether or not
# those vmas satisfy some condition; then, the caller can easily
# get the list of vmas for which the condition is satisfied by:
# vmalist = proc_info.query_all_vmas(is_cond_true_fn)["True"]
#
# A query function that never returns None and always returned some
# key will serve to "sort" or classify the vmas into lists for every
# possible key. More sophisticated queries that return None for some
# vmas can also be used to exclude vmas that do not meet some condition
# while classifying the vmas by key at the same time.
#
# When writing query functions, be aware of how vmas are
# created and unmapped, and what the stored vma_ops mean. For example,
# note that a vma may be created by an 'alloc' or other vma_op, and
# then unmapped when some other operation occurs and creates a new
# vma with an op like 'resize' or 'access_change' to replace it.
def query_all_vmas(self, query_fn):
tag = "{0}.query_all_vmas".format(self.tag)
# SHORTCUT PATH: if query_fn == vma_hash_fn, then don't have to
# iterate over all vmas, can just directly return the mapping that
# we're already keeping track of. Make a copy first though, so
# that the caller can't screw up our all_vmas dict.
# Well, with the change of query_fns to returning lists of keys
# rather than just a single key, it should no longer be possible
# for this to happen...
if query_fn == self.vma_hash_fn:
print_error_exit(tag, ("SHORTCUT PATH successfully hit!").format())
return self.all_vmas.copy()
#all_vma_list = self.get_all_vma_list()
all_vma_list = self.get_vmalist('all_vmas', sort=False)
return construct_dict_from_list(all_vma_list, query_fn)
# See description of query_all_vmas above.
def query_cp_vmas(self, query_fn):
tag = "{0}.query_cp_vmas".format(self.tag)
if query_fn == self.vma_hash_fn:
print_error_exit(tag, ("SHORTCUT PATH successfully hit!").format())
return self.cp_vmas.copy()
cp_vma_list = self.get_cp_vma_list()
return construct_dict_from_list(cp_vma_list, query_fn)
# See description of query_all_vmas above.
def query_vmatable(self, query_fn):
tag = "{0}.query_vmatable".format(self.tag)
# Unlike all_vmas, the values in the vmatable are individual vmas,
# not lists of vmas. So, we don't have much work to do here:
return construct_dict_from_dict(self.vmatable, query_fn)
def query_vmas(self, query_fn, whichtable):
tag = "{0}.query_vmas".format(self.tag)
if whichtable == 'vmatable':
return self.query_vmatable(query_fn)
elif whichtable == 'all_vmas':
return self.query_all_vmas(query_fn)
elif whichtable == 'cp_vmas':
return self.query_cp_vmas(query_fn)
else:
print_error_exit(tag, ("invalid whichtable: "
"{0}").format(whichtable))
return (None, None)
# Iterates over all of the vmas currently in the vmatable
# What if a vma was removed from the vmatable and put into
# all_vmas during the previous quantum? Ugh - better iterate
# over all_vmas, I guess.
def end_sched_quantum(self):
tag = "end_sched_quantum"
# Count the number of vmas that were written to and
# read from in the previous quantum, as well as the total
# number of read and write accesses.
reads = 0
writes = 0
vmas_r = 0
vmas_w = 0
#for vma in self.get_all_vma_list():
for vma in self.get_vmalist('all_vmas', sort=False):
(rq, wq, r, w) = vma.reset_access()
reads += rq
writes += wq
if rq > 0:
vmas_r += 1
if wq > 0:
vmas_w += 1
# Store the vma read and write counts for each quantum in a list,
# so that we can calculate statistics and distributions and such
# later. We want to do this even if the counts were 0 for this
# quantum!
# Note that numpy arrays are immutable (and thus inefficient for
# repeated appending), so we use lists for storing the counts,
# and may convert them to numpy arrays later.
#
# skip_zeros: when examining the vmas_r and vmas_w counts for
# hello-world and firefox, there are some quanta where the
# process is apparently scheduled in, but doesn't perform a
# single read or write to a vma that we track. This seems
# absolutely ridiculous - it must be a quirk of the sched_switch
# events coming from the kernel (or unlikely but possibly there
# are reads and writes that go to vmas that we are not tracking;
# there are a handful of these in every trace, but they seem
# fairly rare). So, if both the read and write vma counts are
# zero, then we probably shouldn't bother appending them to
# the lists that we're tracking, otherwise the statistics will
# be skewed.
skip_zeros = True
if (vmas_r > 0 or vmas_w > 0) or (not skip_zeros):
self.r_counts.append(reads)
self.w_counts.append(writes)
self.rq_counts.append(vmas_r)
self.wq_counts.append(vmas_w)
print_debug(tag, ("{0}-{1}: vmas read / written in this "
"quantum: {2} / {3}").format(
self.progname, self.pid, vmas_r, vmas_w))
#print_debug(tag, ("quantum count arrays: {0} & {1}").format(
# self.rq_counts, self.wq_counts))
else:
#if reads > 0 or writes > 0:
# print_error_exit(tag, ("").format())
self.zero_quanta += 1
return
def sched_stats_string(self):
tag = "{0}.sched_stats_string".format(self.tag)
if len(self.rq_counts) != len(self.wq_counts):
print_error_exit(tag, ("inconsistent lengths: "
"rq_counts {0}, wq_counts {1}").format(
len(self.rq_counts), len(self.wq_counts)))
quanta = len(self.rq_counts)
#total_quanta = quanta + self.zero_quanta
# Use numpy arrays.
# http://wiki.scipy.org/Tentative_NumPy_Tutorial#head-
# 053463ac1c1df8d47f8723f470b62c4bd0d11f07
r_array = np.array(self.r_counts)
w_array = np.array(self.w_counts)
rq_array = np.array(self.rq_counts)
wq_array = np.array(self.wq_counts)
#totals = rq_array + wq_array
s = ("{0} scheduling quanta").format(quanta)
if self.zero_quanta > 0:
s += (" (ignored {0} quanta with no reads/writes)").format(
self.zero_quanta)
if quanta > 0:
s += ("\nmean memory reads per quantum: {0:.2f} (std dev "
"{1:.2f})\nmean memory writes per quantum: {2:.2f} "
"(std dev {3:.2f})").format(
r_array.mean(), r_array.std(),
w_array.mean(), w_array.std())
s += ("\nmean \"segments\" read per quantum: {0:.2f} (std dev "
"{1:.2f})\nmean \"segments\" written per quantum: {2:.2f} "
"(std dev {3:.2f})").format(
rq_array.mean(), rq_array.std(),
wq_array.mean(), wq_array.std())
s += ("\nrq_array: {0}\nwq_array: {1}").format(
self.rq_counts, self.wq_counts)
return s
# Writes the process' current memory map (vmtable) to the given
# output file. The vmas will be sorted by ascending address, to
# match the /proc/pid/maps file.
def write_proc_map(self, output_f):
tag = "{}.write_proc_map".format(self.tag)
output_f.write(("{0}-{1}\n").format(self.progname, self.pid))
#output_f.write(("{0}\n").format(self.sched_stats_string()))
if self.bprm_vma != None:
print_unexpected(False, tag, ("write_proc_map called while "
"self.bprm_vma non-null: {}").format(self.bprm_vma.to_str()))
# Sort by mapping's virtual address (i.e. matching maps file output):
sorted_vmas = sorted(self.vmatable.values(),
key=lambda vma: vma.start_addr)
if len(sorted_vmas) > 0:
for vma in sorted_vmas:
#output_f.write(("{0}\n").format(vma.to_str_maps_format()))
output_f.write(("{0}\n").format(vma.to_str_maps_plus()))
else:
output_f.write(("no vmas in vmatable\n").format())
return
# Creates a plot of the virtual address space for the current vmatable.
# plot_fname will have .png added to it. If pdflist is non-empty, the
# plots will be appended to the PDF files in the pdflist.
def plot_vaspace(self, plot_fname, pdflist):
tag = "plot_vaspace"
# The process_info's vmatable contains individual vmas, keyed
# by their start address. This is what we want for plotting, but
# we want to sort the vmas by their permission first. The dict
# that we construct here will have keys from PERMS_KEYS and values
# that are *lists of vmas*.
def query_fn(vma):
return [vma.perms_key]
table = 'vmatable' # support all_vmas or cp_vmas later?
(vmas_by_perm, vmacount) = query_vmas(self, query_fn, table)
if vmacount == 0:
print_warning(tag, ("vmacount in process {}-{}'s vmatable "
"is 0; will not generate a VAspace plot, returning "
"now").format(self.progname, self.pid))
return
proc_min_addr = self.get_min_addr(table)
proc_max_addr = self.get_max_addr(table)
plot_scale_factor = 2.0
figsize = (8*plot_scale_factor, 6*plot_scale_factor)
bar_kwargs = { # dictionary of plot attributes
'visible' : True,
} #http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter
max_perm_value = len(PERMS_KEYS) + 1 # plus one: for "segment" value
unscaled_min = proc_min_addr
unscaled_max = proc_max_addr
scaled_min = scale_addr(proc_min_addr)
scaled_max = scale_addr(proc_max_addr)
#print_debug(tag, ("all plots: unscaled range [{0}, {1}], scaled "
# "range [{2}, {3}]").format(hex(unscaled_min), hex(unscaled_max),
# hex(scaled_min), hex(scaled_max)))
# Loop and create multiple plots. It is impossible to plot the
# process' entire virtual address space on one chart, because it
# is way too wide.
# Current strategy: plot everything, but only create plots that
# are up to some number of GB wide.
plot_count = 0
max_plot_width = GB_BYTES * 1
left_addr = unscaled_min
while True:
plt.figure(1, figsize=figsize) # needed when making multiple plots
# Goal: in this plot, only include regions that start beyond
# left_addr and end before right_addr.
right_addr = left_addr + max_plot_width - 1
if right_addr > MAX_ADDR64:
right_addr = MAX_ADDR64
min_addr_this_plot = MAX_ADDR64
max_addr_this_plot = 0x0
start_next_plot = MAX_ADDR64
#print_debug(tag, ("starting plotting loop for addr range up "
# "to [{0}, {1}] (width {2} GB); min_addr_this_plot = {3}, "
# "max_addr_this_plot = {4}").format(
# hex(left_addr), hex(right_addr),
# (right_addr - left_addr + 1) / GB_BYTES,
# hex(min_addr_this_plot), hex(max_addr_this_plot)))
y_value = 0
y_labels = [""] + PERMS_KEYS
colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y'])#, 'k'])
# Re-start color cycle for every plot.
# http://matplotlib.org/examples/pylab_examples/filledmark
# er_demo.html
# http://matplotlib.org/api/colors_api.html
for perms_key in PERMS_KEYS:
# Make sure to do these steps even in when we skip a
# permission type:
color = next(colors)
y_value += 1 # start at height 1!
try:
vmalist = sorted(vmas_by_perm[perms_key],
key=lambda vma: vma.start_addr)
except KeyError:
print_debug(tag, ("{}-{}: no vmas in list for perms key "
"{} - continuing to next key").format(
self.progname, self.pid, perms_key))
continue
#if DEBUG:
# print_warning(tag, ("verifying that address list is "
# "sorted - disable this code eventually!").format())
# sl_verify_is_sorted(addr_list)
#print_debug(tag, ("entire addr_list for key {0}: "
# "{1}").format(perms_key, list(map(lambda x: hex(x), addr_list))))
# Pre-process addr_list: in perms_plotter_process_smaps_entry(),
# addr_list is constructed by adding one address for every
# PAGE_SIZE_BYTES in the mapping. So, while the next address in
# addr_list is PAGE_SIZE_BYTES beyond the previous address,
# count it as one contiguous mapping.
# This is kind of reversing the work that was done in
# perms_plotter_process_smaps_entry(), but it will also
# coalesce
# mappings that were separate but contiguous in the original
# smaps.
# It looks like the x-axis for the scatter plot is represented
# internally as a signed 64-bit int; when given an address
# greater than 2^63 - 1, plt.savefig() barfs up a
# "ValueError: math domain error" exception. When I set
# a maximum value of 2^63 - 1 in the
# address list, this error went away. So, for now, just truncate
# anything greater than this value?
# Maybe makes more sense to divide entire x-axis by some
# amount in order to fit? Dividing by 2 might not be
# enough (the maximum address value would then still be
# 2^63, which is juuuust greater than 2^63 - 1), so divide
# by 4?
contig_addr_list = list()
contig_begin = None
prev_addr = None
sliced_addr_list = []
if len(addr_list) > 0 and addr_list[-1] >= left_addr:
# If last address in addr_list is less than left_addr,
# just skip this - avoids ValueError thrown from
# sl_index_ge().
start_idx = sl_index_ge(addr_list, left_addr)
sliced_addr_list = addr_list[start_idx:]
#print_debug(tag, ("key {0}: starting from idx {1} in "
# "addr_list: {2}").format(perms_key, start_idx,
# hex(sliced_addr_list[0])))
#print_debug(tag, ("entire sliced_addr_list: {0}").format(
# list(map(lambda x:hex(x), sliced_addr_list))))
for addr in sliced_addr_list:
# Splitting up plots:
if addr + PAGE_SIZE_BYTES - 1 > right_addr:
if addr < start_next_plot:
start_next_plot = addr
#print_debug(tag, ("addr {0} + page size > "
# "right_addr {1}, so set start_next_plot to "
# "{2}").format(hex(addr), hex(right_addr),
# hex(start_next_plot)))
break
if addr < min_addr_this_plot:
min_addr_this_plot = addr
#print_debug(tag, ("set min_addr_this_plot = "
# "{0}").format(hex(min_addr_this_plot)))
if addr + PAGE_SIZE_BYTES - 1 > max_addr_this_plot:
max_addr_this_plot = addr + PAGE_SIZE_BYTES - 1
#print_debug(tag, ("set max_addr_this_plot = "
# "{0} from addr {1}").format(
# hex(max_addr_this_plot), hex(addr)))
# Track contiguous regions:
if not prev_addr:
# On first loop, we want to hit continue below
prev_addr = addr - PAGE_SIZE_BYTES
if not contig_begin:
contig_begin = addr
if addr == prev_addr + PAGE_SIZE_BYTES:
#continue contig region
prev_addr = addr
continue
else: # start of new contig region
contig_addr_list.append(
[scale_addr(contig_begin),
scale_addr(prev_addr + PAGE_SIZE_BYTES)])
# Add PAGE_SIZE_BYTES so that rectangle will
# span the actual width of the mapping.
contig_begin = addr
prev_addr = addr
if (len(sliced_addr_list) != 0 and
contig_begin and prev_addr and
contig_begin <= prev_addr): # last one:
contig_addr_list.append(
[scale_addr(contig_begin),
scale_addr(prev_addr + PAGE_SIZE_BYTES)])
# bar_kwargs['gid'] = "{0} {1}".format(y_value, perms_key)
# bar_kwargs['label'] = "{0} {1}".format(y_value, perms_key)
#
# # contig_addr_list is already scaled:
# for [l, r] in contig_addr_list:
# plt.barh(bottom=y_value, width=(r - l), height=0.5,
# left=l, color=color, linewidth=None,
# align='center', **bar_kwargs)
# BOOKMARK: New code starts here: skip all of the addr_list crap above
# and just direcly plot each vma in vmalist that is within the
# left-addr, right-addr range!
bar_kwargs['gid'] = "{0} {1}".format(y_value, perms_key)
bar_kwargs['label'] = "{0} {1}".format(y_value, perms_key)
# contig_addr_list is already scaled:
for [l, r] in contig_addr_list:
plt.barh(bottom=y_value, width=(r - l), height=0.5,
left=l, color=color, linewidth=None,
align='center', **bar_kwargs)
# sanity checks for plot splitting:
if (min_addr_this_plot == MAX_ADDR64 or
max_addr_this_plot == 0x0):
print_error_exit(tag, ("invalid min_addr_this_plot {0} or "
"max_addr_this_plot {1}").format(hex(min_addr_this_plot),
hex(max_addr_this_plot)))
if (min_addr_this_plot < left_addr or
max_addr_this_plot > right_addr):
print_error_exit(tag, ("left-right range is [{0}, {1}], "
"but addr_this_plot range is [{2}, {3}]").format(
hex(left_addr), hex(right_addr),
hex(min_addr_this_plot), hex(max_addr_this_plot)))
plt.title("{}: permissions of mapped virtual pages ({} "
"mappings)".format(self.progname, vmacount))
scaled_min_this_plot = scale_addr(min_addr_this_plot)
scaled_max_this_plot = scale_addr(max_addr_this_plot)
# http://matplotlib.org/api/axis_api.html:
# Bullshit: when width of plot [min_addr_this_plot,
# max_addr_this_plot] is just 1 page (4 KB), then pyplot
# apparently refuses to set the x-axis width correctly - the
# two ticks/labels overlap each other in the middle of the plot.
# I tried for an hour to fix this, but it's being ridiculous.
ax = plt.axes()
#ax.autoscale(False)
#ax.autoscale(enable=True, axis='x', tight=True)
#ax.autoscale(enable=False, axis='x', tight=True)
label_kwargs = {
'size' : 'x-large', # x-large, xx-large also
}
xtick_ticks = [scaled_min_this_plot, scaled_max_this_plot]
xtick_labels = [str(hex(min_addr_this_plot)),
str(hex(max_addr_this_plot))] # labels are unscaled!
width = max_addr_this_plot - min_addr_this_plot + 1 # unscaled!
#print_debug(tag, ("this loop: determined plot address range "
# "[{0}, {1}] (width {2} GB)").format(hex(min_addr_this_plot),
# hex(max_addr_this_plot), width/GB_BYTES))
if width > max_plot_width:
print_error_exit(tag, ("got width={0} bytes, but "
"max_plot_width is {1} bytes!").format(width,
max_plot_width))
ax.set_xbound(scaled_min_this_plot, scaled_max_this_plot)
ax.set_xlim(scaled_min_this_plot, scaled_max_this_plot)
ax.set_xticks(xtick_ticks)
ax.set_xticklabels(xtick_labels)
ax.set_xlabel(("Address space - width {0} ({1} GB)").format(
hex(width), width/GB_BYTES), **label_kwargs)
ax.set_ybound(0, max_perm_value)
ax.set_ylim(0, max_perm_value)
ax.set_ylabel("Page permissions", **label_kwargs)
#print_debug(tag, ("numpy range: [{0}]. normal range: "
# "[{1}]").format(list(np.arange(max_perm_value)),
# list(range(max_perm_value))))
ax.set_yticks(range(max_perm_value))
ax.set_yticklabels(y_labels)
ax.tick_params(axis='both', labelsize='x-large')
# Ugh
#plt.tight_layout()
#ax.autoscale(enable=True, axis='x', tight=True)
#ax.autoscale(enable=False, axis='x', tight=True)
# Save plot:
full_plot_fname = ("{0}-{1}-{2}.{3}").format(
plot_fname,
str(plot_count).zfill(2),
#hex(min_addr_this_plot),
"0x" + (hex(min_addr_this_plot)[2:]).zfill(16),
"png")
#print_debug(tag, ("saving this plot at {0}").format(
# full_plot_fname))
plt.savefig(full_plot_fname)
for pdf in pdflist:
pdf.savefig()
plt.close()
# Don't forget, or next plot will be drawn on top of previous
# one!
# Set up for next plot:
plot_count += 1
left_addr = start_next_plot
if left_addr == MAX_ADDR64:
#print_debug(tag, ("breaking out of plotting loop").format())
break
#print_debug(tag, ("looping again for next plot: "
# "left_addr={0}").format(hex(left_addr)))
return
# Updates the rss (resident in physical memory) pages for this process.
# pagetype must be one of RSS_TYPES (see vm_common.py). pagecount is
# the *current* number of pages of this type (not the +/- change in
# pagecount). Due to the way that the kernel tracks the rss page count,
# pagecount can possibly be negative.
# Returns: True on success, False on error.
def set_rss_pages(self, pagetype, pagecount):
tag = "{}.update_rss".format(self.tag)
if pagetype not in RSS_TYPES:
print_error(tag, ("invalid pagetype={}, not in RSS_TYPES="
"{}").format(pagetype, RSS_TYPES))
return False
self.rss_pages[pagetype] = pagecount
return True
# Returns a reference to the dict that maps RSS_TYPES to page counts.
# Note that some RSS_TYPES may not have been entered into the dict yet.
def get_rss_pages(self):
return self.rss_pages
#############################################################################
# Not part of process_info class:
# This method should be called every time a vma is added to or
# removed from the vmatable, OR when a vma is *resized* (see
# detailed comments in analyze_trace.py:map_unmap_vma()). This
# method not only tracks the total size of allocated virtual
# memory, but also the count of vmas, the maximum vma count and
# maximum vm size, and the timestamps when those maximums occurred.
# This tracking is ONLY done in the leader of the process group -
# if proc_info is not a root/leader process, then the size and count
# will be modified in for proc_info.tgid_for_stats!
#
# I verified that the tracking done here (max vm size and timestamp)
# matches the tracking done by the vmacount plots and vm_size plot.
#
# Returns: nothing.
def track_vm_size(proc_info, proc_tracker, add_or_sub, size, timestamp):
tag = "track_vm_size"
if proc_info.is_rootproc:
p = proc_info
if proc_info.pid != proc_info.tgid_for_stats:
# I suppose this will fail if/when group_multiproc is
# False in the analysis script, but I rarely/never disable
# that...
print_error(tag, ("assert failed: is_rootproc True, but "
"pid {} != tgid_for_stats {}").format(proc_info.pid,
proc_info.tgid_for_stats))
else:
p = proc_tracker.get_process_info(proc_info.tgid_for_stats)
if not p:
print_unexpected(True, tag, ("get_process_info({}) "
"failed").format(proc_info.tgid_for_stats))
return
if proc_info.pid == proc_info.tgid_for_stats:
# I suppose this will fail if/when group_multiproc is
# False in the analysis script, but I rarely/never disable
# that...
print_error(tag, ("assert failed: is_rootproc False, but "
"pid {} == tgid_for_stats {}").format(proc_info.pid,
proc_info.tgid_for_stats))
if add_or_sub is 'add':
p.vma_count += 1
p.total_vm_size += size
if p.vma_count > p.max_vma_count:
p.max_vma_count = p.vma_count
p.max_vma_count_time = timestamp
print_debug(tag, ("vmacount_datafn: new max_vma_count "
"{} (time {})").format(p.vma_count, timestamp))
if p.total_vm_size > p.max_vm_size:
p.max_vm_size = p.total_vm_size
p.max_vm_size_time = timestamp
print_debug(tag, ("vmacount_datafn: new max_vm_size "
"{} (time {})").format(p.total_vm_size, timestamp))
elif add_or_sub is 'sub':
p.vma_count -= 1
p.total_vm_size -= size
if p.have_full_info():
if p.vma_count < 0:
print_unexpected(True, tag, ("{}: vma_count fell "
"below 0 to {}!").format(p.name(),
p.vma_count))
if p.total_vm_size < 0:
print_unexpected(True, tag, ("{}: total_vm_size "
"fell below 0 to {}!").format(p.name(),
p.total_vm_size))
else:
print_error(tag, ("invalid arg {}").format(add_or_sub))
# After adding code to ignore vmas for shared libs and guard
# regions, I verified that the tracking done here matches the
# tracking done in the counts and sizes datafns. I also manually
# validated that the counts when vmas are ignored match the
# maps files at teh max-vma-count timestamp.
debug_count(tag, ("{}").format(p.vma_count))
debug_vmsize(tag, ("{} ({})").format(p.total_vm_size,
pretty_bytes(p.total_vm_size)))
return
# Examines the all_vmas tables of all of the proc_infos in the proc_group
# list and returns a list of all vmas that were active at the specified
# timestamp.
# Returns: a list of vmas, or None on error.
def get_active_vmas(proc_group, timestamp, call_ignore_vmas=False):
tag = 'get_active_vmas'
# Unfortunately, all of the query_fn infrastructure that I already
# have set up won't quite work here, because the query_fns don't
# take any arguments, and I need to pass the timestamp as a
# variable...
# Actually, wait a minute: can I use a "closure" to get around
# this limitation?
# http://stackoverflow.com/a/2009645/1230197
# A decent "how-to" for closures is really hard to find...
# This actually works! Validated on max vma count and max VM size
# for dedup and Chrome... amazing.
def point_in_time_queryfn(vma):
tag = 'point_in_time_queryfn'
nonlocal timestamp
nonlocal call_ignore_vmas
# A vma is active at a particular time if the initial timestamp
# when it was mapped is <= the time AND the time when it was
# unmapped is > the time. Note that just checking vma.is_unmapped
# won't work, because when we're looking back in time when this
# method is called, most/all of the vmas will have been unmapped
# at some point already!
if vma.timestamp <= timestamp:
if ((not vma.is_unmapped or vma.unmap_timestamp > timestamp)
and not (call_ignore_vmas and ignore_vma(vma))):
#print_debug(tag, ("active: {}").format(vma))
return ['active']
#print_debug(tag, ("inactive: {}").format(vma))
return None
(vmadict, numvmas) = query_vmas_grouped(proc_group,
point_in_time_queryfn, 'all_vmas')
#print_debug(tag, ("vmadict keys={}; numvmas={}").format(
# vmadict.keys(), numvmas))
if len(vmadict) > 1:
print_unexpected(True, tag, ("vmadict has more than one key-value "
"pair: {}").format(vmadict.keys()))
try:
vmalist = vmadict['active']
if len(vmalist) != numvmas:
print_unexpected(True, tag, ("assert failed: len(vmalist) "
"= {}, but numvmas={}").format(len(vmalist), numvmas))
except KeyError:
if len(vmadict) != 0:
print_unexpected(True, tag, ("vmadict has exactly "
"one key-value pair, but key is not 'active', it's "
"{}").format(vmadict.keys()))
print_debug(tag, ("no active vmas apparently, returning "
"empty vmalist").format())
vmalist = []
return vmalist
# Takes a list of active_vmas and removes vmas that are "identical",
# having the same:
# start_addr
# length
# perms_key
#
# This results in a list of active vmas that only includes those that
# are "fundamental" to the application's execution (e.g. they would not
# disappear if the app were rewritten as multi-threaded instead of
# multi-process). The vmas that are eliminated are those for which
# copy-on-write will NEVER be performed!
#
# Returns a new list containing just distinct vmas.
def deduplicate_active_vmas(active_vmas):
tag = 'deduplicate_active_vmas'
def vmas_are_equal(one, other):
# Be sure to check file / filename: I found an instance (from
# chrome) where vmas matched on start_addr, length, and perms_key,
# but had different filenames:
# /var/cache/fontconfig/845c20fd2c4814bcec78e05d37a63ccc-le64.cache-3
# /var/cache/fontconfig/9eae20f1ff8cc0a7d125749e875856bd-le64.cache-3
# Also, don't try to check is_unmapped, since it represents whether
# or not the vmas was unmapped at some point in the future, and may
# not have any bearing on this moment when active_vmas is being
# processed.
if (one.start_addr == other.start_addr and
one.length == other.length and
one.perms_key == other.perms_key and
one.filename == other.filename and
one.offset == other.offset):
#if False: # debugging / sanity checking...
# # We expect timestamp to differ; vma_op and unmap_op
# # will likely differ as well, right? Yes. is_unmapped
# # may be True or False, but shouldn't differ.
# if (#one.filename != other.filename or
# #one.timestamp != other.timestamp or
# #one.vma_op != other.vma_op or
# #one.unmap_op != other.unmap_op
# #one.is_unmapped != other.is_unmapped
# ):
# print_unexpected(True, 'vmas_are_equal',
# ("two vmas match on start_addr, length, and "
# "perms_key, but not on other fields: "
# "[{} {}] [{} {}]").format(
# one.to_str_maps_format(), one,
# other.to_str_maps_format(), other))
return True
return False
dedup_vmas = list()
sorted_vmas = list(sorted(active_vmas, key=lambda vma: vma.start_addr))
prev = None
i = 0
while i < len(sorted_vmas):
# To be very safe: first make a list of all of the vmas with the
# same start_addr
startlist = [sorted_vmas[i]]
i += 1
while (i < len(sorted_vmas) and
sorted_vmas[i].start_addr == startlist[0].start_addr):
startlist.append(sorted_vmas[i])
i += 1
#print_debug(tag, ("initial startlist: {}").format(startlist))
#orig_len = len(startlist)
# Now, iterate over that list, and disregard any vmas that are
# equivalent according to is_equal(). For the utmost safety /
# completeness, we do an n^2 all-pairs check here:
j = 0
while j < len(startlist):
left = startlist[j]
k = j + 1
while k < len(startlist):
right = startlist[k]
# IMPORTANT: only disregard vmas that are non-writeable!
# If they are writeable, then copy-on-write *could* be
# performed (perhaps not likely, but...), so these vmas
# should be kept + counted.
if ((not right.is_writeable()) and
vmas_are_equal(left, right)):
startlist.pop(k)
else:
k += 1
j += 1
#if len(startlist) != orig_len:
# print_debug(tag, ("now startlist: {}").format(startlist))
dedup_vmas += startlist # list concatenate
return dedup_vmas
# Returns a tuple: (a dict with the vmas inserted into lists by key
# returned from the query_fn; the total count of vmas in the vmalist
# (across all processes in the group) that were hashed into at least
# one slot in the dict).
def query_vmas_grouped(proc_group, query_fn, whichtable):
tag = 'query_vmas_grouped'
# Put the vmatables / all_vmas / cp_vmas from each process in the
# group into a single list. This loses the key information (the
# grouping by start-addr), but this is fine because the query_fn
# doesn't consider this information anyway (it just processes a
# single vma (vm_mapping object)). Because of this, we don't need
# to keep the vma list sorted either.
vmalist = get_group_vmalist(proc_group, whichtable)
print_debug(tag, ("constructed vmalist with {} vmas grouped from {} "
"processes (root: {})").format(len(vmalist), len(proc_group),
proc_group[0].name()))
print_debug(tag, ("now passing the vmalist and query_fn to "
"construct_dict_from_list, which will run the query_fn "
"on every vma in the list and return a tuple: (a dict with "
"the vmas inserted into lists by key returned from the "
"query_fn; the total count of vmas in the vmalist arg "
"that were hashed into at least one slot in the dict)").format())
return construct_dict_from_list(vmalist, query_fn)
#############################################################################
PRINT_SEGSET_CHANGES = True
class segment_set:
"""docstring for segment_set class..."""
tag = "class segment_set"
# Members:
seg_dict = None
vmasize_to_segsize = None
# vmasize_to_segsize is a function with prototype:
# vmasize_to_segsize(vmasize)
# Returns: the segment size that should be used for the specified vma
# size.
#
def __init__(self, vmasize_to_segsize):
tag = "{0}.__init__".format(self.tag)
if not vmasize_to_segsize:
print_error_exit(tag, ("some arg is None: "
"vmasize_to_segsize={0}").format(
vmasize_to_segsize))
self.vmasize_to_segsize = vmasize_to_segsize
self.reset()
def reset(self):
tag = "{0}.reset".format(self.tag)
self.seg_dict = dict()
return
#def to_str(self):
# print_error_exit(self.tag, ("to_str() not implemented yet").format())
# Add a segment corresponding to the specified vma size to the set
# of segments that are being tracked *right now*.
# Returns: a tuple (segsize, now, now_max)
def add_to_now(self, vmasize):
tag = "add_to_now"
global PRINT_SEGSET_CHANGES
segsize = self.vmasize_to_segsize(vmasize)
try:
(now, now_max, ever) = self.seg_dict[segsize]
now += 1
if now > now_max:
now_max = now
except KeyError:
(now, now_max, ever) = (1, 1, 0)
self.seg_dict[segsize] = (now, now_max, ever)
if PRINT_SEGSET_CHANGES:
print_debug(tag, ("seg_dict[{0}] = {1}").format(
segsize, self.seg_dict[segsize]))
return (segsize, now, now_max)
# Add a segment corresponding to the specified vma size to the set
# of segments that are being counted forever.
# Returns: a tuple (segsize, ever)
def add_to_ever(self, vmasize):
tag = "add_to_ever"
global PRINT_SEGSET_CHANGES
segsize = self.vmasize_to_segsize(vmasize)
try:
(now, now_max, ever) = self.seg_dict[segsize]
ever += 1
except KeyError:
(now, now_max, ever) = (0, 0, 1)
self.seg_dict[segsize] = (now, now_max, ever)
if PRINT_SEGSET_CHANGES:
print_debug(tag, ("seg_dict[{0}] = {1}").format(
segsize, self.seg_dict[segsize]))
return (segsize, ever)
# Removes a segment corresponding to the specified vma size from
# the set of segments that are being tracked *right now*. An error
# will be raised if the number of segments tracked goes below zero.
# Returns: a tuple (segsize, now, now_max)
def remove_from_now(self, vmasize):
tag = "remove_from_now"
global PRINT_SEGSET_CHANGES
segsize = self.vmasize_to_segsize(vmasize)
try:
(now, now_max, ever) = self.seg_dict[segsize]
now = now - 1
if now < 0:
print_error_exit(tag, ("number of segments tracked "
"for segsize {0} is below zero! (vmasize {1}, "
"now_max {2}, ever {3}").format(
segsize, vmasize, now_max, ever))
except KeyError:
print_error_exit(tag, ("called for a segsize {0} that "
"has never been seen before! (vmasize {1})").format(
segsize, vmasize))
self.seg_dict[segsize] = (now, now_max, ever)
if PRINT_SEGSET_CHANGES:
print_debug(tag, ("seg_dict[{0}] = {1}").format(
segsize, self.seg_dict[segsize]))
return (segsize, now, now_max)
# Returns the number of segments of the specified vma size (so the size
# will be passed through the vmasize_to_segsize function first). Pass
# vmasize == -1 to get the total number of segments of any size (the sum
# of all of the now values in the dict).
def count_now(self, vmasize):
tag = "{0}.count_now".format(self.tag)
if vmasize == -1:
total = 0
for (now, now_max, ever) in self.seg_dict.values():
total += now
return total
else:
try:
count = self.seg_dict[self.vmasize_to_segsize(vmasize)]
except KeyError:
count = 0
return count
# now_or_ever should be "now" or "ever". plot_pdf is optional.
# now_max is plotted on the same plot as "now".
def plot(self, now_or_ever, plot_fname, title, plot_pdf):
tag = "{0}.plot".format(self.tag)
inefficient_segset = dict()
for (key, value) in self.seg_dict.items():
segsize = key
(now, now_max, ever) = value
if now_or_ever == "now":
inefficient_segset[segsize] = (now, now_max)
elif now_or_ever == "ever":
# segset_to_plot() was initially written assuming two bars
# for now and now_max... for now, just use ever count twice.
inefficient_segset[segsize] = (ever, ever)
else:
print_error_exit(tag, ("invalid now_or_ever: {0}").format(
now_or_ever))
segset_to_plot(inefficient_segset, plot_fname, title, plot_pdf)
return
#############################################################################
def add_to_stats_dict(stats, key, n):
tag = "add_to_stats_dict"
try:
stats[key] += n
except KeyError:
#print_debug(tag, ("adding key {0} to stats dict").format(key))
stats[key] = n
print_debug(tag, ("incremented {0} count: {1}").format(
key, stats[key]))
return
def stats_to_str(stats):
tag = "stats_to_str"
if not stats:
return "(stats is None)"
s = []
for key in sorted(stats.keys()):
value = stats[key]
s.append(("{0}:\t{1}").format(key, value))
s = "\n".join(s)
return s
def context_to_str(context):
tag = "context_to_str"
if not context:
return "(context is None)"
ctx_str = []
for key in sorted(context.keys()):
value = context[key]
try:
hexval = hex(value)
ctx_str.append(("\t[{0}:\t{1} ({2})]").format(
key, value, hexval))
except TypeError:
ctx_str.append(("\t[{0}:\t{1}]").format(
key, value))
ctx_str = "\n".join(ctx_str)
return ctx_str
def segset_to_str(segset):
tag = "segset_to_str"
if not segset:
return "(segset is None)"
total_count = 0
total_max = 0
s = ["\tsegment-size\tcount\tmaxcount"]
for key in sorted(segset.keys()):
(count, maxcount) = segset[key]
total_count += count
total_max += maxcount
s.append(("\t{0}\t{1}\t{2}").format(key, count, maxcount))
s.insert(1, ("\t{0}\t{1}\t{2}").format("TOTAL", total_count, total_max))
s = "\n".join(s)
return s
def vmatable_to_str(vmatable):
tag = "vmatable_to_str"
if not vmatable:
return "(vmatable is None)"
s = ["\tstart_addr:\tmapping-size\tperms-key\tseg-size"]
for key in sorted(vmatable.keys()):
entry = vmatable[key]
if entry.start_addr != key: # sanity check
print_error_exit(tag, ("segment table is inconsistent: key is "
"{0}, but entry.start_addr is {1}!").format(hex(key),
hex(entry.start_addr)))
s.append(("\t{0}:\t{1}\t{2}\t{3}").format(
hex(entry.start_addr), entry.length, entry.perms_key,
entry.seg_size))
s = "\n".join(s)
return s
def segset_count(segset):
tag = "segset_count"
count = 0
for (segcount, maxcount) in segset.values():
count += segcount
return count
def vmatable_count(vmatable):
tag = "vmatable_count"
return len(vmatable)
# Adds the second segset to the first segset. This method does not make
# a copy of the dict first, so the first segset is modified (I think).
def segset_accumulate(dst, src):
tag = "segset_accumulate"
print_debug(tag, ("input src:\n{0}".format(segset_to_str(src))))
print_debug(tag, ("input dst:\n{0}".format(segset_to_str(dst))))
for segsize in sorted(src.keys()):
(src_count, src_maxcount) = src[segsize]
try:
(dst_count, dst_maxcount) = dst[segsize]
dst[segsize] = (dst_count + src_count,
dst_maxcount + src_maxcount)
except KeyError:
dst[segsize] = (src_count, src_maxcount)
print_debug(tag, ("accumulated segset:\n{0}".format(segset_to_str(dst))))
return dst
# Adds a segment of the specified segsize to the segset dict.
def segset_append(segset, segsize):
tag = "segset_insert"
try:
(count, maxcount) = segset[segsize]
count = count + 1
if count > maxcount:
maxcount = count
except KeyError:
count = 1
maxcount = 1
segset[segsize] = (count, maxcount)
return
# A vmatable is a dictionary whose keys are start-addresses and whose values
# are vm_mapping objects. A segset is a dictionary whose keys are segment
# sizes and whose values are tuples of the form (num-segments,
# max-num-segments).
def vmatable_startkey_to_segset(vmatable, vmasize_to_segsize):
tag = "vmatable_startkey_to_segset"
segset = dict()
for (start_addr, vma) in vmatable.items():
segsize = vmasize_to_segsize(vma.length)
segset_append(segset, segsize)
return segset
# Returns the closest power of 2 that is greater than n, starting from
# the minimum segment size. If n itself is a power of 2, then n will
# be returned.
def nextpowerof2(n):
p = min_segment_size
while p < n:
p *= 2
return p
if __name__ == '__main__':
print_error_exit("not an executable module")
|
bsd-3-clause
|
clemkoa/scikit-learn
|
sklearn/ensemble/tests/test_gradient_boosting.py
|
6
|
44811
|
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.datasets import make_classification
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_almost_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_almost_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
if Cls is GradientBoostingRegressor:
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
else:
# Random state is preserved and hence predict_proba must also be
# same
assert_array_equal(est_ws.predict(X), est.predict(X))
assert_array_almost_equal(est_ws.predict_proba(X),
est.predict_proba(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_min_impurity_split():
# Test if min_impurity_split of base estimators is set
# Regression test for #8006
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor, GradientBoostingClassifier]
for GBEstimator in all_estimators:
est = GBEstimator(min_impurity_split=0.1)
est = assert_warns_message(DeprecationWarning, "min_impurity_decrease",
est.fit, X, y)
for tree in est.estimators_.flat:
assert_equal(tree.min_impurity_split, 0.1)
def test_min_impurity_decrease():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor, GradientBoostingClassifier]
for GBEstimator in all_estimators:
est = GBEstimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_.flat:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert_equal(tree.min_impurity_decrease, 0.1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
assert_array_almost_equal(sparse.predict(X_sparse), dense.predict(X))
assert_array_almost_equal(dense.predict(X_sparse), sparse.predict(X))
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
assert_array_almost_equal(sparse.decision_function(X_sparse),
sparse.decision_function(X))
assert_array_almost_equal(dense.decision_function(X_sparse),
sparse.decision_function(X))
assert_array_almost_equal(
np.array(sparse.staged_decision_function(X_sparse)),
np.array(sparse.staged_decision_function(X)))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
def test_gradient_boosting_early_stopping():
X, y = make_classification(n_samples=1000, random_state=0)
gbc = GradientBoostingClassifier(n_estimators=1000,
n_iter_no_change=10,
learning_rate=0.1, max_depth=3,
random_state=42)
gbr = GradientBoostingRegressor(n_estimators=1000, n_iter_no_change=10,
learning_rate=0.1, max_depth=3,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
# Check if early_stopping works as expected
for est, tol, early_stop_n_estimators in ((gbc, 1e-1, 24), (gbr, 1e-1, 13),
(gbc, 1e-3, 36),
(gbr, 1e-3, 28)):
est.set_params(tol=tol)
est.fit(X_train, y_train)
assert_equal(est.n_estimators_, early_stop_n_estimators)
assert est.score(X_test, y_test) > 0.7
# Without early stopping
gbc = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1,
max_depth=3, random_state=42)
gbc.fit(X, y)
gbr = GradientBoostingRegressor(n_estimators=200, learning_rate=0.1,
max_depth=3, random_state=42)
gbr.fit(X, y)
assert gbc.n_estimators_ == 100
assert gbr.n_estimators_ == 200
def test_gradient_boosting_validation_fraction():
X, y = make_classification(n_samples=1000, random_state=0)
gbc = GradientBoostingClassifier(n_estimators=100,
n_iter_no_change=10,
validation_fraction=0.1,
learning_rate=0.1, max_depth=3,
random_state=42)
gbc2 = clone(gbc).set_params(validation_fraction=0.3)
gbc3 = clone(gbc).set_params(n_iter_no_change=20)
gbr = GradientBoostingRegressor(n_estimators=100, n_iter_no_change=10,
learning_rate=0.1, max_depth=3,
validation_fraction=0.1,
random_state=42)
gbr2 = clone(gbr).set_params(validation_fraction=0.3)
gbr3 = clone(gbr).set_params(n_iter_no_change=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Check if validation_fraction has an effect
gbc.fit(X_train, y_train)
gbc2.fit(X_train, y_train)
assert gbc.n_estimators_ != gbc2.n_estimators_
gbr.fit(X_train, y_train)
gbr2.fit(X_train, y_train)
assert gbr.n_estimators_ != gbr2.n_estimators_
# Check if n_estimators_ increase monotonically with n_iter_no_change
# Set validation
gbc3.fit(X_train, y_train)
gbr3.fit(X_train, y_train)
assert gbr.n_estimators_ < gbr3.n_estimators_
assert gbc.n_estimators_ < gbc3.n_estimators_
|
bsd-3-clause
|
phobson/statsmodels
|
statsmodels/datasets/fair/data.py
|
1
|
3081
|
#! /usr/bin/env python
"""Fair's Extramarital Affairs Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Included with permission of the author."""
TITLE = """Affairs dataset"""
SOURCE = """
Fair, Ray. 1978. "A Theory of Extramarital Affairs," `Journal of Political
Economy`, February, 45-61.
The data is available at http://fairmodel.econ.yale.edu/rayfair/pdf/2011b.htm
"""
DESCRSHORT = """Extramarital affair data."""
DESCRLONG = """Extramarital affair data used to explain the allocation
of an individual's time among work, time spent with a spouse, and time
spent with a paramour. The data is used as an example of regression
with censored data."""
#suggested notes
NOTE = """::
Number of observations: 6366
Number of variables: 9
Variable name definitions:
rate_marriage : How rate marriage, 1 = very poor, 2 = poor, 3 = fair,
4 = good, 5 = very good
age : Age
yrs_married : No. years married. Interval approximations. See
original paper for detailed explanation.
children : No. children
religious : How relgious, 1 = not, 2 = mildly, 3 = fairly,
4 = strongly
educ : Level of education, 9 = grade school, 12 = high
school, 14 = some college, 16 = college graduate,
17 = some graduate school, 20 = advanced degree
occupation : 1 = student, 2 = farming, agriculture; semi-skilled,
or unskilled worker; 3 = white-colloar; 4 = teacher
counselor social worker, nurse; artist, writers;
technician, skilled worker, 5 = managerial,
administrative, business, 6 = professional with
advanced degree
occupation_husb : Husband's occupation. Same as occupation.
affairs : measure of time spent in extramarital affairs
See the original paper for more details.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=8, exog_idx=None, dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=8, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
with open(filepath + '/fair.csv', 'rb') as f:
data = np.recfromtxt(f, delimiter=",", names=True, dtype=float)
return data
|
bsd-3-clause
|
ph1l/ocemr
|
ocemr/views/reports.py
|
1
|
43418
|
##########################################################################
#
# This file is part of OCEMR.
#
# OCEMR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OCEMR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OCEMR. If not, see <http://www.gnu.org/licenses/>.
#
#
#########################################################################
# Copyright 2011-8 Philip Freeman <[email protected]>
##########################################################################
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseBadRequest
from django.db.models import Q
from datetime import datetime, timedelta
def dump_table(request,field_names,headers,data_rows):
"""
"""
out_txt="<TABLE>\n"
out_txt += "<TR>"
for f in field_names:
out_txt += "<TH>" + headers[f]
out_txt += "</TR>\n"
for r in data_rows:
out_txt += "<TR CLASS=results>"
for f in field_names:
out_txt += "<TD>%s</TD>"%r[f]
out_txt += "</TR>\n"
out_txt += "</TABLE>\n"
return render(request, 'popup_table.html', {'table': out_txt})
def dump_csv(filename,field_names,headers,data_rows):
"""
return dump_csv(
"filename.csv",
["field_one","field_two"],
{'field_one': "Field One", 'field_two': "Field Two"},
(
{'field_one': "data r1c1", 'field_two': "data r1c2"},
{'field_one': "data r2c1", 'field_two': "data r2c2"},
...
)
)
dump_csv - given a set of data provide a csv file for download
"""
out=[]
row=[]
if headers:
for field in field_names:
row.append(headers[field])
out.append(row)
for data_row in data_rows:
row=[]
for field in field_names:
if data_row.has_key(field):
row.append(data_row[field])
else:
row.append(None)
out.append(row)
import csv, StringIO
from wsgiref.util import FileWrapper
temp=StringIO.StringIO()
out_writer = csv.writer(temp,dialect='excel')
out_writer.writerows(out)
response = HttpResponse(temp.getvalue(),content_type='text/csv')
response['Content-Length'] = len(temp.getvalue())
temp.close()
response['Content-Disposition'] = 'attachment; filename=%s' % (filename)
return response
def dump_graph_pie(title,labels,data):
"""
"""
#
total = 0
other = 0
for x in data:
total += x
todelete=[]
for i in range(0,len(labels)):
if float(data[i])/float(total) <= .02:
other += data[i]
todelete.append(i)
todelete.reverse()
for i in todelete:
labels.pop(i)
data.pop(i)
if other > 0:
labels.append('Other ( < 2% )')
data.append(other)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5,5),dpi=75)
fig.interactive = False
plt.pie(data, labels=labels, autopct='%1.1f%%', shadow=True)
plt.title(title, bbox={'facecolor':'0.8', 'pad':5})
plt.draw()
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
response = HttpResponse(content_type='image/png')
canvas.print_png(response)
matplotlib.pyplot.close(fig)
return response
def yearsago(years, from_date=None):
if from_date is None:
from_date = datetime.now()
try:
return from_date.replace(year=from_date.year - years)
except ValueError:
# Must be 2/29!
return from_date.replace(month=2, day=28,
year=from_date.year-years)
@login_required
def index(request):
"""
Reports Landing Page
"""
return render(request, 'reports.html')
@login_required
def lab_tally(request):
"""
"""
from ocemr.forms import TallyReportForm
form_valid=0
if request.method == 'POST':
form = TallyReportForm(request.POST)
if form.is_valid():
date_start_in = form.cleaned_data['date_start']
if form.cleaned_data['date_end']==None:
date_end_in = form.cleaned_data['date_start']
else:
date_end_in = form.cleaned_data['date_end']
dump_type = form.cleaned_data['dump_type']
form_valid=1
else:
form = TallyReportForm()
if not form_valid:
return render(request, 'popup_form.html', {
'title': 'Enter Date For Report',
'form_action': '/reports/lab/tally/',
'form': form,
})
dt_start = datetime(date_start_in.year,date_start_in.month,date_start_in.day,0,0,0)
dt_end = datetime(date_end_in.year,date_end_in.month,date_end_in.day,23,59,59)
q_this_day = (Q(orderedDateTime__gte=dt_start) & Q(orderedDateTime__lte=dt_end)) & (Q(status="CAN") | Q(status="COM")| Q(status="FAI"))
from ocemr.models import Lab
days_labs = Lab.objects.filter(q_this_day)
ordered={}
canceled={}
complete={}
failed={}
for l in days_labs:
if l.type.title not in ordered.keys():
ordered[l.type.title] = 0
canceled[l.type.title] = 0
complete[l.type.title] = 0
failed[l.type.title] = 0
ordered[l.type.title] += 1
if l.status == "COM":
complete[l.type.title] += 1
elif l.status == "CAN":
canceled[l.type.title] += 1
elif l.status == "FAI":
failed[l.type.title] += 1
sorted_keys=sorted(ordered,key=ordered.__getitem__,reverse=True)
if dump_type == "G_PIE":
title="Lab Tally %s -> %s"%(dt_start.strftime("%Y-%m-%d"),dt_end.strftime("%Y-%m-%d"))
labels=[]
data = []
for key in sorted_keys:
labels.append(key)
data.append(ordered[key])
return dump_graph_pie(title, labels, data)
summary_rows=[]
field_names=[ 'lab', 'ordered', 'complete', 'canceled', 'failed' ]
headers={
'lab': 'Lab Type',
'ordered': 'Number Ordered',
'complete': 'Number Complete',
'canceled': 'Number Canceled',
'failed': 'Number Failed',
}
for l in sorted_keys:
summary_rows.append(
{
'lab': l,
'ordered': ordered[l],
'complete': complete[l],
'canceled': canceled[l],
'failed': failed[l],
})
if dump_type == "CSV":
return dump_csv( "lab-tally-%s-%s.csv"%(dt_start.strftime("%Y%m%d"),dt_end.strftime("%Y%m%d")), field_names, headers, summary_rows )
elif dump_type == "TABLE":
return dump_table(request, field_names, headers, summary_rows )
@login_required
def med_tally(request):
"""
"""
from ocemr.forms import TallyReportForm
form_valid=0
if request.method == 'POST':
form = TallyReportForm(request.POST)
if form.is_valid():
date_start_in = form.cleaned_data['date_start']
if form.cleaned_data['date_end']==None:
date_end_in = form.cleaned_data['date_start']
else:
date_end_in = form.cleaned_data['date_end']
dump_type = form.cleaned_data['dump_type']
form_valid=1
else:
form = TallyReportForm()
if not form_valid:
return render(request, 'popup_form.html', {
'title': 'Enter Date For Report',
'form_action': '/reports/med/tally/',
'form': form,
})
dt_start = datetime(date_start_in.year,date_start_in.month,date_start_in.day,0,0,0)
dt_end = datetime(date_end_in.year,date_end_in.month,date_end_in.day,23,59,59)
q_this_day = (Q(addedDateTime__gte=dt_start) & Q(addedDateTime__lte=dt_end)) & (Q(status="DIS") | Q(status="SUB")| Q(status="CAN"))
from ocemr.models import Med
days_meds = Med.objects.filter(q_this_day)
daily_index=0
ordered={}
dispensed={}
substituted={}
canceled={}
for m in days_meds:
if m.type.title not in ordered.keys():
ordered[m.type.title] = 0
dispensed[m.type.title] = 0
substituted[m.type.title] = 0
canceled[m.type.title] = 0
ordered[m.type.title] += 1
if m.status == "DIS":
dispensed[m.type.title] += 1
elif m.status == "SUB":
substituted[m.type.title] += 1
elif m.status == "CAN":
canceled[m.type.title] += 1
sorted_keys=sorted(ordered,key=ordered.__getitem__,reverse=True)
if dump_type == "G_PIE":
title="Med Tally %s -> %s"%(dt_start.strftime("%Y-%m-%d"),dt_end.strftime("%Y-%m-%d"))
labels=[]
data = []
for key in sorted_keys:
labels.append(key)
data.append(ordered[key])
return dump_graph_pie(title, labels, data)
summary_rows=[]
field_names=[ 'med', 'ord', 'dis', 'sub', 'can' ]
headers={
'med': 'Med Type',
'ord': 'Number Ordered',
'dis': 'Number Dispensed',
'sub': 'Number Substituted',
'can': 'Number Canceled',
}
for m in sorted_keys:
summary_rows.append(
{
'med': m,
'ord': ordered[m],
'dis': dispensed[m],
'sub': substituted[m],
'can': canceled[m],
})
if dump_type == "CSV":
return dump_csv( "med-tally-%s-%s.csv"%(dt_start.strftime("%Y%m%d"),dt_end.strftime("%Y%m%d")), field_names, headers, summary_rows )
elif dump_type == "TABLE":
return dump_table(request, field_names, headers, summary_rows )
@login_required
def village_tally(request):
"""
"""
from ocemr.forms import VillageTallyReportForm
form_valid=0
if request.method == 'POST':
form = VillageTallyReportForm(request.POST)
if form.is_valid():
dump_type = form.cleaned_data['dump_type']
form_valid=1
else:
form = VillageTallyReportForm()
if not form_valid:
return render(request, 'popup_form.html', {
'title': 'Enter type For Report',
'form_action': '/reports/village/tally/',
'form': form,
})
from ocemr.models import Patient
patients = Patient.objects.all()
totals={}
for p in patients:
if p.village.name not in totals.keys():
totals[p.village.name] = 0
totals[p.village.name] += 1
sorted_keys=sorted(totals,key=totals.__getitem__,reverse=True)
if dump_type == "G_PIE":
title="Village Tally"
labels=[]
data = []
for key in sorted_keys:
labels.append(key)
data.append(totals[key])
return dump_graph_pie(title, labels, data)
summary_rows=[]
field_names=[ 'village', 'num_patients', ]
headers={ 'village': 'Village', 'num_patients': 'Number of Patients', }
for village in sorted_keys:
summary_rows.append({'village':village, 'num_patients':totals[village]})
if dump_type == "CSV":
return dump_csv( "village-tally-%s.csv"%(datetime.now().strftime("%Y%m%d")), field_names, headers, summary_rows )
elif dump_type == "TABLE":
return dump_table(request, field_names, headers, summary_rows )
@login_required
def clinician_tally(request):
"""
"""
from ocemr.forms import TallyReportForm
form_valid=0
if request.method == 'POST':
form = TallyReportForm(request.POST)
if form.is_valid():
date_start_in = form.cleaned_data['date_start']
if form.cleaned_data['date_end']==None:
date_end_in = form.cleaned_data['date_start']
else:
date_end_in = form.cleaned_data['date_end']
dump_type = form.cleaned_data['dump_type']
form_valid=1
else:
form = TallyReportForm()
if not form_valid:
return render(request, 'popup_form.html', {
'title': 'Enter Date For Report',
'form_action': '/reports/clinician/tally/',
'form': form,
})
from ocemr.models import Visit
dt_start = datetime(date_start_in.year,date_start_in.month,date_start_in.day,0,0,0)
dt_end = datetime(date_end_in.year,date_end_in.month,date_end_in.day,23,59,59)
q_this_day = (Q(finishedDateTime__gte=dt_start) & Q(finishedDateTime__lte=dt_end)) & (Q(status="CHOT") | Q(status="RESO"))
days_visits = Visit.objects.filter(q_this_day)
daily_index=0
totals={}
for v in days_visits:
if v.finishedBy not in totals.keys():
totals[v.finishedBy] = 0
totals[v.finishedBy] += 1
sorted_keys=sorted(totals,key=totals.__getitem__,reverse=True)
if dump_type == "G_PIE":
title="Clinician Visit Tally %s -> %s"%(dt_start.strftime("%Y-%m-%d"),dt_end.strftime("%Y-%m-%d"))
labels=[]
data = []
for key in sorted_keys:
labels.append(key)
data.append(totals[key])
return dump_graph_pie(title, labels, data)
summary_rows=[]
field_names=[ 'clinician', 'num_patients', ]
headers={ 'clinician': 'Clinician', 'num_patients': 'Number of Patients', }
for clinician in sorted_keys:
summary_rows.append({'clinician':clinician, 'num_patients':totals[clinician]})
if dump_type == "CSV":
return dump_csv( "clinician-visit-tally-%s-%s.csv"%(dt_start.strftime("%Y%m%d"),dt_end.strftime("%Y%m%d")), field_names, headers, summary_rows )
elif dump_type == "TABLE":
return dump_table(request, field_names, headers, summary_rows )
@login_required
def diagnosis_tally(request):
"""
"""
from ocemr.forms import DiagnosisTallyReportForm
form_valid=0
if request.method == 'POST':
form = DiagnosisTallyReportForm(request.POST)
if form.is_valid():
date_start_in = form.cleaned_data['date_start']
if form.cleaned_data['date_end']==None:
date_end_in = form.cleaned_data['date_start']
else:
date_end_in = form.cleaned_data['date_end']
age_min = form.cleaned_data['age_min']
age_max = form.cleaned_data['age_max']
dump_type = form.cleaned_data['dump_type']
form_valid=1
else:
form = DiagnosisTallyReportForm()
if not form_valid:
return render(request, 'popup_form.html', {
'title': 'Enter Date Range For Report',
'form_action': '/reports/diagnosis/tally/',
'form': form,
})
dt_start = datetime(
date_start_in.year,date_start_in.month,date_start_in.day,
0,0,0
)
dt_end = datetime(
date_end_in.year,date_end_in.month,date_end_in.day,
23,59,59
)
#(Q(finishedDateTime__gte=dt_start) & Q(finishedDateTime__lte=dt_end))
field_names=[
'diag',
'tally',
]
headers={
'diag': 'Diagnosis',
'tally': 'Tally',
}
from ocemr.models import Visit, Diagnosis
q_this_day = ( Q(finishedDateTime__gte=dt_start) & Q(finishedDateTime__lte=dt_end) ) & (Q(status="CHOT") | Q(status="RESO"))
days_visits = Visit.objects.filter(q_this_day)
currentYear = datetime.now().year
if age_min != None:
maxYear = currentYear-age_min
q_age_range = Q(patient__birthYear__lte=maxYear)
days_visits = days_visits.filter(q_age_range)
if age_max != None:
minYear = currentYear-age_max
q_age_range = Q(patient__birthYear__gte=minYear)
days_visits = days_visits.filter(q_age_range)
q_dignosis_active = (Q(status="NEW") | Q(status="FOL"))
s={}
num_visits=0
for v in days_visits:
num_visits += 1
for d in Diagnosis.objects.filter(visit=v).filter(q_dignosis_active):
diagnosis = d.type.title
if diagnosis not in s.keys():
s[diagnosis]=1
else:
s[diagnosis] += 1
sorted_keys=sorted(s, key=s.get)
sorted_keys.reverse()
if dump_type == "G_PIE":
title="Diagnosis Tally %s -> %s"%(dt_start.strftime("%Y-%m-%d"),dt_end.strftime("%Y-%m-%d"))
labels=[]
data = []
for key in sorted_keys:
labels.append(key)
data.append(s[key])
return dump_graph_pie(title, labels, data)
summary_rows=[]
summary_rows.append({'diag':'Dates:', 'tally': "%s-%s-%s -> %s-%s-%s"%(
dt_start.day, dt_start.month, dt_start.year,
dt_end.day, dt_end.month, dt_end.year,
)} )
summary_rows.append({'diag':'Total Patients:', 'tally':num_visits})
summary_rows.append({'diag':'', 'tally':''})
summary_rows.append({'diag':'Diagnosis', 'tally':'Tally'})
for key in sorted_keys:
summary_rows.append({'diag': key, 'tally': s[key]})
if dump_type == "CSV":
return dump_csv( "diagnosis-tally-%s-%s.csv"%(dt_start.strftime("%Y%m%d"),dt_end.strftime("%Y%m%d")), field_names, headers, summary_rows )
elif dump_type == "TABLE":
return dump_table(request, field_names, headers, summary_rows )
else:
raise "Invalid Dump Type"
@login_required
def legacy_patient_daily(request):
"""
"""
from ocemr.forms import SelectDateForm
form_valid=0
if request.method == 'POST':
form = SelectDateForm(request.POST)
if form.is_valid():
date_in = form.cleaned_data['date']
form_valid=1
else:
form = SelectDateForm()
if not form_valid:
return render(request, 'popup_form.html', {
'title': 'Enter Date For Report',
'form_action': '/reports/legacy/patient/daily/',
'form': form,
})
from ocemr.models import Visit, Diagnosis, Med, Referral
field_names=[
'pt_daily_index',
'pt_name',
'pt_monthly_index',
'sex',
'age',
'village',
'diagnosis',
'prescription',
'referral'
]
headers={
'pt_daily_index': 'Pt # of Day',
'pt_name': 'Patient Name',
'pt_monthly_index': 'Pt # of Month',
'sex': 'Sex',
'age': 'Age',
'village': 'Village',
'diagnosis': 'Diagnosis',
'prescription': 'Prescription',
'referral': 'Referral',
}
if date_in.month == 12:
next_month=1
next_year=date_in.year+1
else:
next_month=date_in.month+1
next_year=date_in.year
dt_month_start = datetime(date_in.year,date_in.month,1,0,0,0)
dt_month_end = datetime(next_year,next_month,1,0,0,0)
dt_start = datetime(date_in.year,date_in.month,date_in.day,0,0,0)
dt_end = datetime(date_in.year,date_in.month,date_in.day,23,59,59)
q_this_month = ( Q(finishedDateTime__gte=dt_month_start) &
Q(finishedDateTime__lt=dt_month_end ) &
Q(finishedDateTime__lt=dt_start)
) & (Q(status="CHOT") | Q(status="RESO"))
q_this_day = ( Q(finishedDateTime__gte=dt_start) & Q(finishedDateTime__lte=dt_end ) ) & (Q(status="CHOT") | Q(status="RESO"))
months_visits = Visit.objects.filter(q_this_month)
pt_monthly_index = len(months_visits)
days_visits = Visit.objects.filter(q_this_day)
q_dignosis_active = (Q(status="NEW") | Q(status="FOL"))
daily_index=0
summary_rows=[]
for v in days_visits:
daily_index += 1
pt_monthly_index += 1
referral = ""
for r in Referral.objects.filter(visit=v):
referral += "%s - %s; "%(r.to,r.reason)
summary_rows.append( {
'pt_daily_index': daily_index,
'pt_name': v.patient.fullName,
'pt_monthly_index': pt_monthly_index,
'sex': v.patient.gender,
'age': v.patient.age,
'village': v.patient.village.name,
'diagnosis': '',
'prescription': '',
'referral': referral,
})
for d in Diagnosis.objects.filter(visit=v).filter(q_dignosis_active):
diagnosis = d.type.title
summary_rows.append( {
'pt_daily_index': '',
'pt_name': '',
'pt_monthly_index': '',
'sex': '',
'age': '',
'village': '',
'diagnosis': diagnosis,
'prescription': '',
'referral': '',
})
for m in Med.objects.filter(diagnosis=d,status='DIS'):
prescription = m.type.title
summary_rows.append( {
'pt_daily_index': '',
'pt_name': '',
'pt_monthly_index': '',
'sex': '',
'age': '',
'village': '',
'diagnosis': diagnosis,
'prescription': prescription,
'referral': '',
})
return dump_csv( "patient-daily-%s.csv"%(date_in.strftime("%Y%m%d")), field_names, headers, summary_rows )
@login_required
def cashflow(request):
"""
"""
if not request.user.is_staff:
return HttpResponse( "Permission Denied." )
from ocemr.forms import SelectDateRangeForm
form_valid=0
if request.method == 'POST':
form = SelectDateRangeForm(request.POST)
if form.is_valid():
date_start_in = form.cleaned_data['date_start']
if form.cleaned_data['date_end']==None:
date_end_in = form.cleaned_data['date_start']
else:
date_end_in = form.cleaned_data['date_end']
form_valid=1
else:
form = SelectDateRangeForm()
if not form_valid:
return render(request, 'popup_form.html', {
'title': 'Enter Date Range For Report',
'form_action': '/reports/cashflow/',
'form': form,
})
field_names=[
'date',
'totbill',
'totcoll',
'diff',
]
headers={
'date': 'Date',
'totbill': 'Total Billed',
'totcoll': 'Total Collected',
'diff': 'Total Difference',
}
summary_rows=[]
from ocemr.models import Visit, CashLog
curdate = date_start_in
total_billed = 0
total_collected = 0
while curdate <= date_end_in:
billed=0
collected=0
dt_start = datetime(curdate.year,curdate.month,curdate.day,0,0,0)
dt_end = datetime(curdate.year,curdate.month,curdate.day,23,59,59)
for v in Visit.objects.filter(finishedDateTime__gte=dt_start,finishedDateTime__lte=dt_end):
billed += v.cost
for c in CashLog.objects.filter(addedDateTime__gte=dt_start,addedDateTime__lte=dt_end):
collected += c.amount
summary_rows.append({'date':curdate,'totbill':billed,'totcoll':collected,'diff':billed-collected})
total_billed += billed
total_collected += collected
curdate = curdate + timedelta(1)
summary_rows.append({'date':'Total','totbill':total_billed, 'totcoll':total_collected,'diff':total_billed-total_collected})
return dump_csv( "cashflow-%s-%s.csv"%(date_start_in.strftime("%Y%m%d"), date_end_in.strftime("%Y%m%d")), field_names, headers, summary_rows )
@login_required
def accounts_outstanding(request):
"""
"""
from ocemr.forms import SelectDateRangeForm
form_valid=0
if request.method == 'POST':
form = SelectDateRangeForm(request.POST)
if form.is_valid():
date_start_in = form.cleaned_data['date_start']
if form.cleaned_data['date_end']==None:
date_end_in = form.cleaned_data['date_start']
else:
date_end_in = form.cleaned_data['date_end']
form_valid=1
else:
form = SelectDateRangeForm()
if not form_valid:
return render(request, 'popup_form.html', {
'title': 'Enter Date Range For Report',
'form_action': '/reports/accounts_outstanding/',
'form': form,
})
from ocemr.models import Visit, CashLog
field_names=[
'patient',
'billed',
'collected',
'owed',
]
headers={
'patient': 'Patient',
'billed': 'Total Billed',
'collected': 'Total Collected',
'owed': 'Amount Owed',
}
dt_start = datetime(date_start_in.year,date_start_in.month,date_start_in.day,0,0,0)
dt_end = datetime(date_end_in.year,date_end_in.month,date_end_in.day,23,59,59)
summary_rows=[]
for v in Visit.objects.filter(finishedDateTime__gte=dt_start,finishedDateTime__lte=dt_end):
billed=v.cost
collected=0
for c in CashLog.objects.filter(visit=v):
collected += c.amount
if collected < v.cost:
existing = filter(lambda person: person['patient'] == v.patient, summary_rows)
if existing:
summary_rows.remove(existing[0])
billed += existing[0]['billed']
collected += existing[0]['collected']
summary_rows.append({'patient': v.patient, 'billed': billed, 'collected':collected, 'owed':billed-collected})
return dump_csv( "outstanding_accounts-%s-%s.csv"%(date_start_in.strftime("%Y%m%d"), date_end_in.strftime("%Y%m%d")),field_names, headers, summary_rows )
@login_required
def diagnosis_patient(request):
"""
"""
from ocemr.forms import DiagnosisPatientReportForm
form_valid=0
if request.method == 'POST':
form = DiagnosisPatientReportForm(request.POST)
if form.is_valid():
diagnosis_types = form.cleaned_data['diagnosis']
date_start_in = form.cleaned_data['date_start']
if form.cleaned_data['date_end']==None:
date_end_in = form.cleaned_data['date_start']
else:
date_end_in = form.cleaned_data['date_end']
age_min = form.cleaned_data['age_min']
age_max = form.cleaned_data['age_max']
dump_type = form.cleaned_data['dump_type']
form_valid=1
else:
form = DiagnosisPatientReportForm()
if not form_valid:
return render(request, 'popup_form.html', {
'title': 'Enter Details For Report',
'form_action': '/reports/diagnosis_patient/',
'form': form,
})
dt_start = datetime(
date_start_in.year,date_start_in.month,date_start_in.day,
0,0,0
)
dt_end = datetime(
date_end_in.year,date_end_in.month,date_end_in.day,
23,59,59
)
#(Q(finishedDateTime__gte=dt_start) & Q(finishedDateTime__lte=dt_end))
field_names=[
'pid',
'pname',
'diag',
'diagdate',
]
headers={
'pid': 'Id',
'pname': 'Name',
'diag': 'Diagnosis',
'diagdate': 'Diagnosis Date',
}
from ocemr.models import Diagnosis
q = ( Q(diagnosedDateTime__gte=dt_start) & Q(diagnosedDateTime__lte=dt_end) ) & Q(status="NEW")
diags = Diagnosis.objects.filter(q)
subq=Q(type__in=diagnosis_types)
diags = diags.filter( subq )
currentYear = datetime.now().year
if age_min != None:
maxYear = currentYear-age_min
q_age_range = Q(patient__birthYear__lte=maxYear)
diags = diags.filter(q_age_range)
if age_max != None:
minYear = currentYear-age_max
q_age_range = Q(patient__birthYear__gte=minYear)
diags = diags.filter(q_age_range)
summary_rows=[]
summary_rows.append(headers)
for d in diags:
summary_rows.append({
'pid': d.patient.id,
'pname': d.patient,
'diag': d.type,
'diagdate': d.diagnosedDateTime,
})
if dump_type == "CSV":
return dump_csv( "diagnosis-patient-%s-%s.csv"%(dt_start.strftime("%Y%m%d"),dt_end.strftime("%Y%m%d")), field_names, headers, summary_rows )
elif dump_type == "TABLE":
return dump_table(request, field_names, headers, summary_rows )
else:
raise "Invalid Dump Type"
@login_required
def hmis105(request):
"""
"""
from ocemr.forms import Hmis105Form
form_valid=0
if request.method == 'POST':
form = Hmis105Form(request.POST)
if form.is_valid():
date_start_in = form.cleaned_data['date_start']
if form.cleaned_data['date_end']==None:
date_end_in = form.cleaned_data['date_start']
else:
date_end_in = form.cleaned_data['date_end']
dump_type = form.cleaned_data['dump_type']
form_valid=1
else:
form = Hmis105Form()
if not form_valid:
return render(request, 'popup_form.html', {
'title': 'Enter Details For Report',
'form_action': '/reports/hmis105/',
'form': form,
})
dt_start = datetime(
date_start_in.year,date_start_in.month,date_start_in.day,
0,0,0
)
dt_end = datetime(
date_end_in.year,date_end_in.month,date_end_in.day,
23,59,59
)
#(Q(finishedDateTime__gte=dt_start) & Q(finishedDateTime__lte=dt_end))
field_names=[
'cat',
'lt28dm',
'lt28df',
'lt4m',
'lt4f',
'gt4m',
'gt4f',
'gt59m',
'gt59f',
'visit_list',
]
headers={
'cat': 'Category',
'lt28dm': '0-28 days, Male',
'lt28df': '0-28 days, Female',
'lt4m': '0-4 years, Male',
'lt4f': '0-4 years, Female',
'gt4m': '5-59 years, Male',
'gt4f': '5-59 years, Female',
'gt59m': '60+ years, Male',
'gt59f': '60+ years, Female',
'visit_list': 'Visit List',
}
from ocemr.models import Visit, Referral, Diagnosis, DiagnosisType
summary_rows=[]
new_patient_visits=[0,0,0,0,0,0,0,0]
old_patient_visits=[0,0,0,0,0,0,0,0]
total_visits=[0,0,0,0,0,0,0,0]
referrals_from=[0,0,0,0,0,0,0,0]
diagnoses = [
{ 'NAME': "1.3.1 Epidemic-Prone Diseases" },
{ 'NAME': "01 Acute flaccid paralysis", 'ICPC2': [ "N70", ], },
{ 'NAME': "02 Animal Bites (suspected rabies)", 'ICPC2': [ ], },
{ 'NAME': "03 Cholera", 'ICPC2': [ ], },
{ 'NAME': "04 Dysentery", 'ICPC2': [ "D11", ], },
{ 'NAME': "05 Guinea worm", 'ICPC2': [ ], },
{ 'NAME': "06 Malaria", 'ICPC2': [ "A73", ], },
{ 'NAME': "07 Measles", 'ICPC2': [ "A71", ], },
{ 'NAME': "08 Bacterial meningitis", 'ICPC2': [ "N71", ], },
{ 'NAME': "09 Neonatal Tetanus", 'ICPC2': [ "N72", ], },
{ 'NAME': "10 Plauge", 'ICPC2': [ ], },
{ 'NAME': "11 Yellow Fever", 'ICPC2': [ ], },
{ 'NAME': "12 Other Viral Haemorrhagic Fevers", 'ICPC2': [ ], },
{ 'NAME': "13 Severe Acute Respiratory Infection (SARI)", 'ICPC2': [ ], },
{ 'NAME': "14 Adverse Events Following Immunization (AEFI)", 'ICPC2': [ ], },
{ 'NAME': "15 Typhoid Fever", 'ICPC2': [ "A78", ], },
{ 'NAME': "16 Presumptive MDR TB cases", 'ICPC2': [ ], },
{ 'NAME': "Other Emerging Infectious Diseases , specify e.g. small pox, ILI, SARS", 'ICPC2': [ ], },
{ 'NAME': "1.3.2 Other Infectious/Communicable Diseases" },
{ 'NAME': "17 Diarrhea- Acute", 'ICPC2': [ "D11", ], },
{ 'NAME': "18 Diarrhea- Persistent", 'ICPC2': [ ], },
{ 'NAME': "19 Urethral discharges", 'ICPC2': [ "Y03", ], },
{ 'NAME': "20 Genital ulcers", 'ICPC2': [ ], },
{ 'NAME': "21 Sexually Transmitted Infection due to SGBV", 'ICPC2': [ ], },
{ 'NAME': "22 Other Sexually Transmitted Infections", 'ICPC2': [ "X70", "X71", "X72", "X73", "X74", "X90", "X91", "X92", "Y70", "Y71", "Y72", "Y73", "Y74", "Y75", "Y76" ], },
{ 'NAME': "23 Urinary Tract Infections (UTI)", 'ICPC2': [ "U70", "U71", "U72" ], },
{ 'NAME': "24 Intestinal Worms", 'ICPC2': [ "D96", ], },
{ 'NAME': "25 Hematological Meningitis", 'ICPC2': [ ], },
{ 'NAME': "26 Other types of meningitis", 'ICPC2': [ ], },
{ 'NAME': "27 No pneumonia Cough or Cold", 'ICPC2': [ "R05", "R71", "R74" ], },
{ 'NAME': "28 Pneumonia", 'ICPC2': [ "R81", ], },
{ 'NAME': "29 Skin Diseases", 'ICPC2': [ "S", ], },
{ 'NAME': "30 New TB cases diagnosed (Bacteriologically confirmed)", 'ICPC2': [ ], },
{ 'NAME': "30 New TB cases diagnosed (Clinically Diagnosed)", 'ICPC2': [ ], },
{ 'NAME': "30 New TB cases diagnosed (EPTB)", 'ICPC2': [ ], },
{ 'NAME': "31 Leprosy (Manually review S76, S99)", 'ICPC2': [ "S76", "S99" ], },
{ 'NAME': "32 Tuberculosis MDR/XDR cases started on trastment", 'ICPC2': [ ], },
{ 'NAME': "33 Tetanus (over 28 days age)", 'ICPC2': [ "N72", ], 'MIN_AGE_DAYS': 28 },
{ 'NAME': "34 Sleeping sickness", 'ICPC2': [ ], },
{ 'NAME': "35 Pelvic Inflammatory Disease (PID)", 'ICPC2': [ "X74", ], },
{ 'NAME': "36 Brucellosis", 'ICPC2': [ ], },
{ 'NAME': "1.3.3 Neonatal Diseases" },
{ 'NAME': "37 Neonatal Sepsis (0-7days)", 'ICPC2': [ ], 'MAX_AGE_DAYS': 7},
{ 'NAME': "38 Neonatal Sepsis (8-28days)", 'ICPC2': [ ], 'MIN_AGE_DAYS': 8, 'MAX_AGE_DAYS': 28},
{ 'NAME': "39 Neonatal Pneumonia", 'ICPC2': [ ], },
{ 'NAME': "40 Neonatal Meningitis", 'ICPC2': [ ], },
{ 'NAME': "41 Neonatal Jaundice", 'ICPC2': [ ], },
{ 'NAME': "42 Premature baby (as a condition for management)", 'ICPC2': [ ], },
{ 'NAME': "43 Other Neonatal Conditions", 'ICPC2': [ ], },
{ 'NAME': "1.3.4 Non Communicable Diseases/Conditions" },
{ 'NAME': "44 Sickle Cell Anaemia", 'ICPC2': [ ], },
{ 'NAME': "45 Other types of Anaemia", 'ICPC2': [ "B82", ], },
{ 'NAME': "46 Gastro-Intestinal Disorders (non-Infective) ",
'ICPC2': [ "D0", "D1", "D2", "D74", "D75", "D76", "D77",
"D78", "D79", "D8", "D9" ], },
{ 'NAME': "47 Pain Requiring Pallative Care", 'ICPC2': [ ], },
{ 'NAME': "Oral diseases" },
{ 'NAME': "48 Dental Caries", 'ICPC2': [ ], },
{ 'NAME': "49 Gingivitis", 'ICPC2': [ ], },
{ 'NAME': "50 HIV-Oral lesions", 'ICPC2': [ ], },
{ 'NAME': "51 Oral Cancers", 'ICPC2': [ ], },
{ 'NAME': "52 Other Oral Conditions", 'ICPC2': [ ], },
{ 'NAME': "ENT conditions" },
{ 'NAME': "53 Otitis media", 'ICPC2': [ "H70", "H71", "H72", "H74" ], },
{ 'NAME': "54 Hearing loss", 'ICPC2': [ "H02", "H28", "H76", "H81", "H86" ], },
{ 'NAME': "55 Other ENT conditions", 'ICPC2': [ "H", "R06", "R07", "R08", "R09", "R1", "R20", "R21", "R72", "R73", "R87", "R90", "R97" ], 'subtract': [ "53 Otitis media", "54 Hearing loss" ] },
{ 'NAME': "Eye conditions" },
{ 'NAME': "56 Ophthalmia neonatorum", 'ICPC2': [ "F03", "F70", "F73", "F80" ], 'MAX_AGE_DAYS': 21 },
{ 'NAME': "57 Cataracts", 'ICPC2': [ ], },
{ 'NAME': "58 Refractive errors", 'ICPC2': [ ], },
{ 'NAME': "59 Glaucoma", 'ICPC2': [ ], },
{ 'NAME': "60 Trachoma", 'ICPC2': [ ], },
{ 'NAME': "61 Tumors", 'ICPC2': [ ], },
{ 'NAME': "62 Blindness", 'ICPC2': [ ], },
{ 'NAME': "63 Diabetic Retinopathy", 'ICPC2': [ ], },
{ 'NAME': "64 Other Eye conditions", 'ICPC2': [ "F", ], 'subtract': [ "56 Ophthalmia neonatorum", "57 Cataracts", "58 Refractive errors", "59 Glaucoma", "60 Trachoma", "61 Tumors", "62 Blindness", "63 Diabetic Retinopathy" ] },
{ 'NAME': "Mental Health" },
{ 'NAME': "65 Bipolar disorders", 'ICPC2': [ "P73" ], },
{ 'NAME': "66 Depression", 'ICPC2': [ "P76", ], },
{ 'NAME': "67 Epilepsy", 'ICPC2': [ "N88", ], },
{ 'NAME': "68 Dementia", 'ICPC2': [ "P70", ], },
{ 'NAME': "69 Childhood Mental Disorders", 'ICPC2': [ ], },
{ 'NAME': "70 Schizophrenia", 'ICPC2': [ "P72", ], },
{ 'NAME': "71 HIV related psychosis (Manually review B90)", 'ICPC2': [ "B90", ], },
{ 'NAME': "72 Anxiety disorders", 'ICPC2': [ "P74", ], },
{ 'NAME': "73 Alcohol abuse", 'ICPC2': [ "P15", "P16" ], },
{ 'NAME': "74 Drug abuse", 'ICPC2': [ "P19", ], },
{ 'NAME': "75 Other Mental Health Conditions", 'ICPC2': [
"P71", "P72", "P73", "P74", "P75", "P76", "P77", "P78",
"P79", "P8", "P9"
], },
{ 'NAME': "Chronic respiratory diseases" },
{ 'NAME': "76 Asthma", 'ICPC2': [ "R96", ], },
{ 'NAME': "77 Chronic Obstructive Pulmonary Disease (COPD)", 'ICPC2': [ ], },
{ 'NAME': "Cancers" },
{ 'NAME': "78 Cancer Cervix", 'ICPC2': [ ], },
{ 'NAME': "79 Cancer Prostate", 'ICPC2': [ ], },
{ 'NAME': "80 Cancer Breast", 'ICPC2': [ ], },
{ 'NAME': "81 Cancer Lung", 'ICPC2': [ ], },
{ 'NAME': "82 Cancer Liver", 'ICPC2': [ ], },
{ 'NAME': "83 Cancer Colon", 'ICPC2': [ ], },
{ 'NAME': "84 Cancer Sarcoma", 'ICPC2': [ ], },
{ 'NAME': "85 Cancer Others", 'ICPC2': [ ], },
{ 'NAME': "Cardiovascular diseases" },
{ 'NAME': "86 Stroke/Cardiovascular Accident(CVA)", 'ICPC2': [ ], },
{ 'NAME': "87 Hypertension", 'ICPC2': [ "K86", "K87" ], },
{ 'NAME': "88 Heart failure", 'ICPC2': [ ], },
{ 'NAME': "89 Ischemic Heart Diseases", 'ICPC2': [ ], },
{ 'NAME': "90 Rheumatic Heart Diseases", 'ICPC2': [ ], },
{ 'NAME': "91 Other Cardiovascular Diseases", 'ICPC2': [ "K", ], 'subtract': [ "86 Stroke/Cardiovascular Accident(CVA)", "87 Hypertension", "88 Heart failure", "89 Ischemic Heart Diseases", "90 Rheumatic Heart Diseases", ] },
{ 'NAME': "Endocrine and Metabolic Disorders" },
{ 'NAME': "92 Diabetes mellitus", 'ICPC2': [ "T90", "W85" ], },
{ 'NAME': "93 Thyroid Disease", 'ICPC2': [ ], },
{ 'NAME': "94 Other Endocrine and Metabolic Diseases", 'ICPC2': [ ], },
{ 'NAME': "Malnutrition" },
{ 'NAME': "95 Severe Acute Malnutrition (SAM) With oedema", 'ICPC2': [ "T91", ], },
{ 'NAME': "95 Severe Acute Malnutrition (SAM) Without oedema", 'ICPC2': [ "T91", ], },
{ 'NAME': "96 Mild Acute Malnutrition (MAM)", 'ICPC2': [ "T91", ], },
{ 'NAME': "Injuries" },
{ 'NAME': "97 Jaw injuries", 'ICPC2': [ "L07", ], },
{ 'NAME': "98 Injuries- Road traffic Accidents (Manually review A80, A81)", 'ICPC2': [ "A80", "A81" ], },
{ 'NAME': "99 Injuries due to motorcycle(boda-boda)(Manually review A80, A81)", 'ICPC2': [ "A80", "A81" ], },
{ 'NAME': "100 Injuries due to Gender based violence (Manually review A80, A81)", 'ICPC2': [ "A80", "A81" ], },
{ 'NAME': "101 Injuries (Trauma due to other causes) (Manually review A80, A81)", 'ICPC2': [ "A80", "A81" ], },
{ 'NAME': "102 Animal bites (Domestic) (Manually review S13)", 'ICPC2': [ "S13", ], },
{ 'NAME': "102 Animal bites (Wild) (Manually review S13)", 'ICPC2': [ "S13", ], },
{ 'NAME': "102 Animal bites (Insects) (Manually review S13)", 'ICPC2': [ "S13", ], },
{ 'NAME': "103 Snake bites (Manually review S13)", 'ICPC2': [ "S13", ], },
{ 'NAME': "1.3.5 Minor Operations in OPD" },
{ 'NAME': "104 Tooth extractions", 'ICPC2': [ ], },
{ 'NAME': "105 Dental Fillings", 'ICPC2': [ ], },
{ 'NAME': "106 Other Minor Operations", 'ICPC2': [ ], },
{ 'NAME': "1.3.7 Neglegted Tropical Diseases (NTDs)"},
{ 'NAME': "107 Leishmaniasis (Manually review A78)", 'ICPC2': [ "A78", ], },
{ 'NAME': "108 Lymphatic Filariasis (hydrocele) (Manually review A78)", 'ICPC2': [ "A78", ], },
{ 'NAME': "109 Lymphatic Filariasis (Lymphoedema) (Manually review A78)", 'ICPC2': [ "A78", ], },
{ 'NAME': "110 Urinary Schistosomiasis (Manually review A78)", 'ICPC2': [ "A78", ], },
{ 'NAME': "111 Intestinal Schistosomiasis (Manually review A78)", 'ICPC2': [ "A78", ], },
{ 'NAME': "112 Onchocerciasis (Manually review A78)", 'ICPC2': [ "A78", ], },
{ 'NAME': "Maternal conditions" },
{ 'NAME': "113 Abortions due to Gender-Based Violence (GBV)", 'ICPC2': [ ], },
{ 'NAME': "114 Abortions due to other causes", 'ICPC2': [ "W82", "W83" ], },
{ 'NAME': "115 Malaria in pregnancy", 'DIAGNOSIS_TYPE': [ 18, 20 ], },
{ 'NAME': "116 High blood pressure in pregnancy", 'DIAGNOSIS_TYPE': [ 216, ], },
{ 'NAME': "117 Obstructed labour", 'ICPC2': [ ], },
{ 'NAME': "118 Puerperial Sepsis", 'ICPC2': [ "W70", ], },
{ 'NAME': "119 Haemorrhage in pregnancy (APH or PPH)", 'ICPC2': [ "W03", "W17" ], },
{ 'NAME': "Other OPD conditions" },
{ 'NAME': "120 Other diagnoses (specify priority diseases for District)", 'ICPC2': [ ], },
{ 'NAME': "121 Deaths in OPD", 'ICPC2': [ ], },
{ 'NAME': "122 All others", 'ICPC2': [ ], },
{ 'NAME': "1.3.9 RISKY BEHAVIORS" },
{ 'NAME': "1.3.10 BODY MASS INDEX (BMI)" },
]
diag_map = {}
for d in diagnoses:
diag_map[d['NAME']] = [0,0,0,0,0,0,0,0,[]]
visits = Visit.objects.filter(Q(finishedDateTime__gte=dt_start) & Q(finishedDateTime__lte=dt_end) & (Q(status="CHOT") | Q(status="RESO")) )
for v in visits:
if not v.finishedDateTime:
print "Warning: skipping unfinished visit: %s"%v
index = 0
if v.patient.birthDate:
if v.patient.birthDate > (v.finishedDateTime - timedelta(days=28)).date():
index = 0
elif v.patient.birthDate > yearsago(5, v.finishedDateTime).date():
index += 2
elif v.patient.birthDate > yearsago(60, v.finishedDateTime).date():
index += 4
else:
index += 6
else:
if v.patient.birthYear >= v.finishedDateTime.year - 4:
index += 2
elif v.patient.birthYear >= v.finishedDateTime.year - 59:
index += 4
else:
index += 6
if v.patient.gender == "F":
index += 1
total_visits[index] += 1
if Visit.objects.filter(Q(patient=v.patient) & Q(finishedDateTime__lte=dt_end)).count() > 1:
old_patient_visits[index] += 1
else:
new_patient_visits[index] += 1
referrals = Referral.objects.filter(visit=v)
referrals_from[index] += len(referrals)
for d in diagnoses:
if not d.has_key("types"):
d["types"] = []
if d.has_key('ICPC2'):
for icpc2type in d['ICPC2']:
for dt in DiagnosisType.objects.filter(Q(icpc2Code__startswith=icpc2type)):
d["types"].append(dt.id)
if d.has_key('DIAGNOSIS_TYPE'):
for id in d['DIAGNOSIS_TYPE']:
d["types"].append(id)
if d.has_key("MAX_AGE_DAYS"):
if not v.patient.birthDate:
continue
delta = v.finishedDateTime.date() - v.patient.birthDate
if delta.days > d['MAX_AGE_DAYS']:
continue
if d.has_key("MIN_AGE_DAYS"):
if not v.patient.birthDate:
if v.patient.birthYear <= v.finishedDateTime.year:
continue
else:
delta = v.finishedDateTime.date() - v.patient.birthDate
if delta.days < d['MIN_AGE_DAYS']:
continue
my_d = Diagnosis.objects.filter(Q(visit=v) & Q(status="NEW") & Q(type__in=d["types"]))
my_count = my_d.count()
if my_count > 0:
diag_map[d['NAME']][index] += my_count
diag_map[d['NAME']][8].append("<A HREF=\"#%(diag_name)s_%(visit_id)s\" onclick=\"window.opener.location.href='/visit/%(visit_id)s/plan/';\">%(visit_id)s</A>"%{'visit_id': v.id, 'diag_name': d['NAME']})
summary_rows.append({ 'cat': "1.1 Outpatient Attendance",
'lt28dm': "", 'lt28df': "",
'lt4m': "", 'lt4f': "",
'gt4m': "", 'gt4f': "",
'gt59m': "", 'gt59f': "",
'visit_list': "",
})
summary_rows.append({ 'cat': "New Attendance",
'lt28dm': new_patient_visits[0],
'lt28df': new_patient_visits[1],
'lt4m': new_patient_visits[2],
'lt4f': new_patient_visits[3],
'gt4m': new_patient_visits[4],
'gt4f': new_patient_visits[5],
'gt59m': new_patient_visits[6],
'gt59f': new_patient_visits[7],
'visit_list': "",
})
summary_rows.append({ 'cat': "Re-Attendance",
'lt28dm': old_patient_visits[0],
'lt28df': old_patient_visits[1],
'lt4m': old_patient_visits[2],
'lt4f': old_patient_visits[3],
'gt4m': old_patient_visits[4],
'gt4f': old_patient_visits[5],
'gt59m': old_patient_visits[6],
'gt59f': old_patient_visits[7],
'visit_list': "",
})
summary_rows.append({ 'cat': "Total Attendance",
'lt28dm': total_visits[0],
'lt28df': total_visits[1],
'lt4m': total_visits[2],
'lt4f': total_visits[3],
'gt4m': total_visits[4],
'gt4f': total_visits[5],
'gt59m': total_visits[6],
'gt59f': total_visits[7],
'visit_list': "",
})
summary_rows.append({ 'cat': "1.2 Outpatient Referrals",
'lt28dm': "", 'lt28df': "",
'lt4m': "", 'lt4f': "",
'gt4m': "", 'gt4f': "",
'gt59m': "", 'gt59f': "",
'visit_list': "",
})
summary_rows.append({ 'cat': "Referrals to unit",
'lt28dm': "", 'lt28df': "",
'lt4m': "-", 'lt4f': "-",
'gt4m': "-", 'gt4f': "-",
'gt59m': "-", 'gt59f': "-",
'visit_list': "",
})
summary_rows.append({ 'cat': "Referrals from unit",
'lt28dm': referrals_from[0],
'lt28df': referrals_from[1],
'lt4m': referrals_from[2],
'lt4f': referrals_from[3],
'gt4m': referrals_from[4],
'gt4f': referrals_from[5],
'gt59m': referrals_from[6],
'gt59f': referrals_from[7],
'visit_list': "",
})
summary_rows.append({ 'cat': "1.3 Outpatient Diagnoses",
'lt28dm': "", 'lt28df': "",
'lt4m': "", 'lt4f': "",
'gt4m': "", 'gt4f': "",
'gt59m': "", 'gt59f': "",
'visit_list': "",
})
for d in diagnoses:
if d.has_key("types") and len(d["types"]) == 0:
summary_rows.append({
'cat': d['NAME'],
'lt28dm': "", 'lt28df': "",
'lt4m': "-", 'lt4f': "-",
'gt4m': "-", 'gt4f': "-",
'gt59m': "-", 'gt59f': "-",
'visit_list': "-",
})
else:
if d.has_key("subtract"):
for subtraction in d['subtract']:
for i in range(0,7):
diag_map[d['NAME']][i]-=diag_map[subtraction][i]
for v in diag_map[subtraction][8]:
if v in diag_map[d['NAME']][8]:
diag_map[d['NAME']][8].remove(v)
summary_rows.append({
'cat': d['NAME'],
'lt28dm': diag_map[d['NAME']][0],
'lt28df': diag_map[d['NAME']][1],
'lt4m': diag_map[d['NAME']][2],
'lt4f': diag_map[d['NAME']][3],
'gt4m': diag_map[d['NAME']][4],
'gt4f': diag_map[d['NAME']][5],
'gt59m': diag_map[d['NAME']][6],
'gt59f': diag_map[d['NAME']][7],
'visit_list': ", ".join(diag_map[d['NAME']][8])
})
if dump_type == "CSV":
return dump_csv( "hmis-105-%s-%s.csv"%(dt_start.strftime("%Y%m%d"),dt_end.strftime("%Y%m%d")), field_names, headers, summary_rows )
elif dump_type == "TABLE":
return dump_table(request, field_names, headers, summary_rows )
else:
raise "Invalid Dump Type"
|
gpl-3.0
|
LiaoPan/scikit-learn
|
examples/cluster/plot_kmeans_digits.py
|
230
|
4524
|
"""
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
mmottahedi/neuralnilm_prototype
|
scripts/e304.py
|
2
|
6109
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 1000
GRADIENT_STEPS = 100
SEQ_LENGTH = 512
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.0,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=False,
input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
'std': np.array([ 0.12636775], dtype=np.float32)},
target_stats={
'mean': np.array([ 0.04066789, 0.01881946,
0.24639061, 0.17608672, 0.10273963],
dtype=np.float32),
'std': np.array([ 0.11449792, 0.07338708,
0.26608968, 0.33463112, 0.21250485],
dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
loss_function=partial(scaled_cost_ignore_inactive, loss_func=mse),
updates_func=momentum,
learning_rate=1e-02,
learning_rate_changes_by_iteration={
500: 5e-03,
4000: 1e-03,
6000: 5e-06,
7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True
)
def callback(net, epoch):
net.source.reshape_target_to_2D = True
net.plotter = MDNPlotter(net)
net.generate_validation_data_and_set_shapes()
net.loss_function = partial(scaled_cost_ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH)
net.learning_rate.set_value(1e-05)
def exp_a(name):
# 3 appliances
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy['reshape_target_to_2D'] = False
source = RealApplianceSource(**source_dict_copy)
source.reshape_target_to_2D = False
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'W': Normal(std=1/sqrt(N)),
'num_units': source.n_outputs,
'nonlinearity': None
}
]
net_dict_copy['layer_changes'] = {
5001: {
'remove_from': -2,
'callback': callback,
'new_layers': [
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
}
}
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
mit
|
mjgrav2001/scikit-learn
|
sklearn/datasets/mlcomp.py
|
289
|
3855
|
# Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
|
bsd-3-clause
|
KarrLab/kinetic_datanator
|
datanator/data_source/rna_halflife/doi_10_1091_mbc_e11_01_0028.py
|
1
|
5405
|
import pandas as pd
from datanator.util import rna_halflife_util, file_util
from datanator_query_python.query import query_uniprot
import datetime
import datanator.config.core
import datetime
from pymongo.collation import Collation, CollationStrength
from pymongo.errors import WriteError
import tempfile
import shutil
class Halflife(rna_halflife_util.RnaHLUtil):
def __init__(self, cache_dir=None, server=None, src_db=None, protein_col=None,
authDB=None, readPreference=None, username=None, password=None,
verbose=None, max_entries=None, des_db=None, rna_col=None):
"""Init
Args:
cache_dir (:obj:`str`, optional): Cache directory for logs. Defaults to None.
server (:obj:`str`, optional): MongoDB server address. Defaults to None.
db (:obj:`str`, optional): Database where initial uniprot collection resides. Defaults to None.
collection_str (:obj:`str`, optional): name of collection. Defaults to None.
authDB (:obj:`str`, optional): MongoDB authentication database. Defaults to None.
readPreference (:obj:`str`, optional): MongoDB read preference. Defaults to None.
username (:obj:`str`, optional): MongoDB username. Defaults to None.
password (:obj:`str`, optional): MongoDB password. Defaults to None.
verbose (:obj:`bool`, optional): Wheter to display verbose messages. Defaults to None.
max_entries (:obj:`int`, optional): Number of records to be processed. Defaults to None.
uniprot_col_db (:obj:`int`, optional): Database to which new uniprot records will be inserted. Defaults to None.
"""
super().__init__(server=server, username=username, password=password, src_db=src_db,
des_db=des_db, protein_col=protein_col, rna_col=rna_col, authDB=authDB, readPreference=readPreference,
max_entries=max_entries, verbose=verbose, cache_dir=cache_dir)
self.uniprot_query = query_uniprot.QueryUniprot(username=username, password=password, server=server,
authSource=authDB, collection_str='uniprot', readPreference=readPreference)
self.collation = Collation('en', strength=CollationStrength.SECONDARY)
self.max_entries = max_entries
self.verbose = verbose
def fill_rna_halflife(self, df, start=0):
"""Fill rna_halflife collection with information parsed
from the publication
Args:
df (:obj:`pandas.DataFrame`): dataframe to be loaded.
start (:obj:`int`, optional): Starting row in df. Defaults to 0.
"""
row_count = len(df.index)
for i, row in df.iloc[start:].iterrows():
if i == self.max_entries:
break
if i % 10 == 0 and self.verbose:
print("Processing locus {} out {}".format(i, row_count))
systematic_name = row['systematic_name']
halflife = row['halflife'] * 60
unit = 's'
species = 'Saccharomyces cerevisiae W303'
r_squared = row['r_squared']
ncbi_taxonomy_id = 580240
reference = [{'doi': '10.1091/mbc.e11-01-0028'}]
obj = {'systematic_name': systematic_name,
'halflife': halflife,
'unit': unit,
'species': species,
'ncbi_taxonomy_id': ncbi_taxonomy_id,
'r_squared': r_squared,
'reference': reference}
try:
self.rna_hl_collection.update_one({'halflives.systematic_name': systematic_name},
{'$addToSet': {'halflives': obj}},
collation=self.collation, upsert=True)
except WriteError:
protein_name = self.uniprot_query.get_protein_name_by_gn(systematic_name, species=[559292])
self.rna_hl_collection.insert_one({'gene_name': systematic_name,
'protein_name': protein_name,
'halflives': [obj]})
import os
def main():
src_db = 'datanator'
des_db = 'datanator'
rna_col = 'rna_halflife'
protein_col = 'uniprot'
cache_dir = tempfile.mkdtemp()
username = datanator.config.core.get_config()[
'datanator']['mongodb']['user']
password = datanator.config.core.get_config(
)['datanator']['mongodb']['password']
server = datanator.config.core.get_config(
)['datanator']['mongodb']['server']
src = Halflife(server=server, src_db=src_db,
protein_col=protein_col, authDB='admin', readPreference='nearest',
username=username, password=password, verbose=True, max_entries=float('inf'),
des_db=des_db, rna_col=rna_col, cache_dir=cache_dir)
# url = 'https://www.molbiolcell.org/doi/suppl/10.1091/mbc.e11-01-0028/suppl_file/mc-e11-01-0028-s10.xls'
url = os.path.expanduser('~/karr_lab/datanator/docs/mc-e11-01-0028-s10.xls')
names = ['systematic_name', 'halflife', 'r_squared']
df_s1 = src.make_df(url, 'all mRNAs (with R2>0.8) ranked', names=names, usecols='A:C',
file_type='xls', file_name='mc-e11-01-0028-s10.xls')
src.fill_rna_halflife(df_s1)
shutil.rmtree(cache_dir)
if __name__ == '__main__':
main()
|
mit
|
sonnyhu/scipy
|
scipy/signal/ltisys.py
|
38
|
76123
|
"""
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
from __future__ import division, print_function, absolute_import
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
# Added pole placement
# Mar 2015: Clancy Rowley
# Rewrote lsim
# May 2015: Felix Berkenkamp
# Split lti class into subclasses
#
import warnings
import numpy as np
#np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
#use scipy's qr until this is solved
from scipy.linalg import qr as s_qr
import numpy
from numpy import (r_, eye, real, atleast_1d, atleast_2d, poly,
squeeze, asarray, product, zeros, array,
dot, transpose, ones, zeros_like, linspace, nan_to_num)
import copy
from scipy import integrate, interpolate, linalg
from scipy._lib.six import xrange
from .filter_design import tf2zpk, zpk2tf, normalize, freqs
__all__ = ['tf2ss', 'ss2tf', 'abcd_normalize', 'zpk2ss', 'ss2zpk', 'lti',
'TransferFunction', 'ZerosPolesGain', 'StateSpace', 'lsim',
'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp', 'place_poles']
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator polynomials.
The denominator needs to be at least as long as the numerator.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if M > K:
msg = "Improper transfer function. `num` is longer than `den`."
raise ValueError(msg)
if M == 0 or K == 0: # Null system
return (array([], float), array([], float), array([], float),
array([], float))
# pad numerator to have same number of columns has denominator
num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:, 0]
else:
D = array([], float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K - 2, K - 1)]
B = eye(K - 1, 1)
C = num[:, 1:] - num[:, 0] * den[1:]
return A, B, C, D
def _none_to_empty_2d(arg):
if arg is None:
return zeros((0, 0))
else:
return arg
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
def _shape_or_none(M):
if M is not None:
return M.shape
else:
return (None,) * 2
def _choice_not_none(*args):
for arg in args:
if arg is not None:
return arg
def _restore(M, shape):
if M.shape == (0, 0):
return zeros(shape)
else:
if M.shape != shape:
raise ValueError("The input arrays have incompatible shapes.")
return M
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are two-dimensional.
If enough information on the system is provided, that is, enough
properly-shaped arrays are passed to the function, the missing ones
are built from this information, ensuring the correct number of
rows and columns. Otherwise a ValueError is raised.
Parameters
----------
A, B, C, D : array_like, optional
State-space matrices. All of them are None (missing) by default.
Returns
-------
A, B, C, D : array
Properly shaped state-space matrices.
Raises
------
ValueError
If not enough information on the system was provided.
"""
A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
MA, NA = _shape_or_none(A)
MB, NB = _shape_or_none(B)
MC, NC = _shape_or_none(C)
MD, ND = _shape_or_none(D)
p = _choice_not_none(MA, MB, NC)
q = _choice_not_none(NB, ND)
r = _choice_not_none(MC, MD)
if p is None or q is None or r is None:
raise ValueError("Not enough information on the system.")
A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
A = _restore(A, (p, p))
B = _restore(B, (p, q))
C = _restore(C, (r, p))
D = _restore(D, (r, q))
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num : 2-D ndarray
Numerator(s) of the resulting transfer function(s). `num` has one row
for each of the system's outputs. Each row is a sequence representation
of the numerator polynomial.
den : 1-D ndarray
Denominator of the resulting transfer function(s). `den` is a sequence
representation of the denominator polynomial.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
B = B[:, input:input + 1]
D = D[:, input:input + 1]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape, axis=0) == 0) and (product(C.shape, axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape, axis=0) == 0) and (product(A.shape, axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:, 0] + B[:, 0] + C[0, :] + D
num = numpy.zeros((nout, num_states + 1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k, :])
num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
return tf2ss(*zpk2tf(z, p, k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A, B, C, D, input=input))
class lti(object):
"""
Linear Time Invariant system base class.
Parameters
----------
*system : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
Notes
-----
`lti` instances do not exist directly. Instead, `lti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies.
"""
def __new__(cls, *system):
"""Create an instance of the appropriate subclass."""
if cls is lti:
N = len(system)
if N == 2:
return super(lti, cls).__new__(TransferFunction)
elif N == 3:
return super(lti, cls).__new__(ZerosPolesGain)
elif N == 4:
return super(lti, cls).__new__(StateSpace)
else:
raise ValueError('Needs 2, 3 or 4 arguments.')
# __new__ was called from a subclass, let it call its own functions
return super(lti, cls).__new__(cls)
def __init__(self, *system):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
self.inputs = None
self.outputs = None
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self.to_tf().num
@num.setter
def num(self, num):
obj = self.to_tf()
obj.num = num
source_class = type(self)
self._copy(source_class(obj))
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self.to_tf().den
@den.setter
def den(self, den):
obj = self.to_tf()
obj.den = den
source_class = type(self)
self._copy(source_class(obj))
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self.to_zpk().zeros
@zeros.setter
def zeros(self, zeros):
obj = self.to_zpk()
obj.zeros = zeros
source_class = type(self)
self._copy(source_class(obj))
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self.to_zpk().poles
@poles.setter
def poles(self, poles):
obj = self.to_zpk()
obj.poles = poles
source_class = type(self)
self._copy(source_class(obj))
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self.to_zpk().gain
@gain.setter
def gain(self, gain):
obj = self.to_zpk()
obj.gain = gain
source_class = type(self)
self._copy(source_class(obj))
@property
def A(self):
"""A matrix of the `StateSpace` system."""
return self.to_ss().A
@A.setter
def A(self, A):
obj = self.to_ss()
obj.A = A
source_class = type(self)
self._copy(source_class(obj))
@property
def B(self):
"""B matrix of the `StateSpace` system."""
return self.to_ss().B
@B.setter
def B(self, B):
obj = self.to_ss()
obj.B = B
source_class = type(self)
self._copy(source_class(obj))
@property
def C(self):
"""C matrix of the `StateSpace` system."""
return self.to_ss().C
@C.setter
def C(self, C):
obj = self.to_ss()
obj.C = C
source_class = type(self)
self._copy(source_class(obj))
@property
def D(self):
"""D matrix of the `StateSpace` system."""
return self.to_ss().D
@D.setter
def D(self, D):
obj = self.to_ss()
obj.D = D
source_class = type(self)
self._copy(source_class(obj))
def impulse(self, X0=None, T=None, N=None):
"""
Return the impulse response of a continuous-time system.
See `scipy.signal.impulse` for details.
"""
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
"""
Return the step response of a continuous-time system.
See `scipy.signal.step` for details.
"""
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `scipy.signal.lsim` for details.
"""
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `scipy.signal.bode` for details.
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = s1.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""
Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `scipy.signal.freqresp` for details.
"""
return freqresp(self, w=w, n=n)
class TransferFunction(lti):
"""Linear Time Invariant system class in transfer function form.
Represents the system as the transfer function
:math:`H(s)=\sum_i b[i] s^i / \sum_j a[j] s^i`, where :math:`a` are
elements of the numerator `num` and :math:`b` are the elements of the
denominator `den`.
Parameters
----------
*system : arguments
The `TransferFunction` class can be instantiated with 1 or 2 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
"""
def __new__(cls, *system):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], lti):
return system[0].to_tf()
# No special conversion needed
return super(TransferFunction, cls).__new__(cls)
def __init__(self, *system):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], lti):
return
super(TransferFunction, self).__init__(self, *system)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return '{0}(\n{1},\n{2}\n)'.format(
self.__class__.__name__,
repr(self.num),
repr(self.den),
)
@property
def num(self):
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den))
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den))
class ZerosPolesGain(lti):
"""
Linear Time Invariant system class in zeros, poles, gain form.
Represents the system as the transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
"""
def __new__(cls, *system):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], lti):
return system[0].to_zpk()
# No special conversion needed
return super(ZerosPolesGain, cls).__new__(cls)
def __init__(self, *system):
"""Initialize the zeros, poles, gain LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], lti):
return
super(ZerosPolesGain, self).__init__(self, *system)
self._zeros = None
self._poles = None
self._gain = None
self.zeros, self.poles, self.gain = system
def __repr__(self):
"""Return representation of the `ZerosPolesGain` system"""
return '{0}(\n{1},\n{2},\n{3}\n)'.format(
self.__class__.__name__,
repr(self.zeros),
repr(self.poles),
repr(self.gain),
)
@property
def zeros(self):
return self._zeros
@zeros.setter
def zeros(self, zeros):
self._zeros = atleast_1d(zeros)
# Update dimensions
if len(self.zeros.shape) > 1:
self.outputs, self.inputs = self.zeros.shape
else:
self.outputs = 1
self.inputs = 1
@property
def poles(self):
return self._poles
@poles.setter
def poles(self, poles):
self._poles = atleast_1d(poles)
@property
def gain(self):
return self._gain
@gain.setter
def gain(self, gain):
self._gain = gain
def _copy(self, system):
"""
Copy the parameters of another `ZerosPolesGain` system.
Parameters
----------
system : instance of `ZerosPolesGain`
The zeros, poles gain system that is to be copied
"""
self.poles = system.poles
self.zeros = system.zeros
self.gain = system.gain
def to_tf(self):
"""
Convert system representation to `TransferFunction`.
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain))
def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain))
class StateSpace(lti):
"""
Linear Time Invariant system class in state-space form.
Represents the system as the first order differential equation
:math:`\dot{x} = A x + B u`.
Parameters
----------
*system : arguments
The `StateSpace` class can be instantiated with 1 or 4 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies.
"""
def __new__(cls, *system):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], lti):
return system[0].to_ss()
# No special conversion needed
return super(StateSpace, cls).__new__(cls)
def __init__(self, *system):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], lti):
return
super(StateSpace, self).__init__(self, *system)
self._A = None
self._B = None
self._C = None
self._D = None
self.A, self.B, self.C, self.D = abcd_normalize(*system)
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return '{0}(\n{1},\n{2},\n{3},\n{4}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
)
@property
def A(self):
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
@property
def B(self):
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.inputs = self.B.shape[-1]
@property
def C(self):
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.outputs = self.C.shape[0]
@property
def D(self):
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
def _copy(self, system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
self.A = system.A
self.B = system.B
self.C = system.C
self.D = system.D
def to_tf(self, **kwargs):
"""
Convert system representation to `TransferFunction`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
**kwargs))
def to_zpk(self, **kwargs):
"""
Convert system representation to `ZerosPolesGain`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
**kwargs))
def to_ss(self):
"""
Return a copy of the current `StateSpace` system.
Returns
-------
sys : instance of `StateSpace`
The current system (copy)
"""
return copy.deepcopy(self)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=True):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U = 0 or None, a zero input is used.
T : array_like
The time steps at which the input is defined and at which the
output is desired. Must be nonnegative, increasing, and equally spaced.
X0 : array_like, optional
The initial conditions on the state vector (zero by default).
interp : bool, optional
Whether to use linear (True, the default) or zero-order-hold (False)
interpolation for the input array.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time evolution of the state vector.
Examples
--------
Simulate a double integrator y'' = u, with a constant input u = 1
>>> from scipy import signal
>>> system = signal.lti([[0., 1.], [0., 0.]], [[0.], [1.]], [[1., 0.]], 0.)
>>> t = np.linspace(0, 5)
>>> u = np.ones_like(t)
>>> tout, y, x = signal.lsim(system, u, t)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
n_states = A.shape[0]
n_inputs = B.shape[1]
n_steps = T.size
if X0 is None:
X0 = zeros(n_states, sys.A.dtype)
xout = zeros((n_steps, n_states), sys.A.dtype)
if T[0] == 0:
xout[0] = X0
elif T[0] > 0:
# step forward to initial time, with zero input
xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
else:
raise ValueError("Initial time must be nonnegative")
no_input = (U is None
or (isinstance(U, (int, float)) and U == 0.)
or not np.any(U))
if n_steps == 1:
yout = squeeze(dot(xout, transpose(C)))
if not no_input:
yout += squeeze(dot(U, transpose(D)))
return T, squeeze(yout), squeeze(xout)
dt = T[1] - T[0]
if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
warnings.warn("Non-uniform timesteps are deprecated. Results may be "
"slow and/or inaccurate.", DeprecationWarning)
return lsim2(system, U, T, X0)
if no_input:
# Zero input: just use matrix exponential
# take transpose because state is a row vector
expAT_dt = linalg.expm(transpose(A) * dt)
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], expAT_dt)
yout = squeeze(dot(xout, transpose(C)))
return T, squeeze(yout), squeeze(xout)
# Nonzero input
U = atleast_1d(U)
if U.ndim == 1:
U = U[:, np.newaxis]
if U.shape[0] != n_steps:
raise ValueError("U must have the same number of rows "
"as elements in T.")
if U.shape[1] != n_inputs:
raise ValueError("System does not define that many inputs.")
if not interp:
# Zero-order hold
# Algorithm: to integrate from time 0 to time dt, we solve
# xdot = A x + B u, x(0) = x0
# udot = 0, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 ] [ u0 ]
M = np.vstack([np.hstack([A * dt, B * dt]),
np.zeros((n_inputs, n_states + n_inputs))])
# transpose everything because the state and input are row vectors
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd = expMT[n_states:, :n_states]
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
else:
# Linear interpolation between steps
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.vstack([np.hstack([A * dt, B * dt,
np.zeros((n_states, n_inputs))]),
np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)]),
np.zeros((n_inputs, n_states + 2 * n_inputs))])
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd1 = expMT[n_states+n_inputs:, :n_states]
Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
for i in xrange(1, n_steps):
xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if X0 is None:
X = squeeze(sys.B)
else:
X = squeeze(sys.B + X0)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
_, h, _ = lsim(sys, 0., T, X, interp=False)
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0, interp=False)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = signal.bode(s1)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Examples
--------
# Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([], [1, 1, 1], [5])
# transfer function: H(s) = 5 / (s-1)^3
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
sys = system.to_tf()
else:
sys = lti(*system).to_tf()
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
return w, h
# This class will be used by place_poles to return its results
# see http://code.activestate.com/recipes/52308/
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _valid_inputs(A, B, poles, method, rtol, maxiter):
"""
Check the poles come in complex conjugage pairs
Check shapes of A, B and poles are compatible.
Check the method chosen is compatible with provided poles
Return update method to use and ordered poles
"""
poles = np.asarray(poles)
if poles.ndim > 1:
raise ValueError("Poles must be a 1D array like.")
# Will raise ValueError if poles do not come in complex conjugates pairs
poles = _order_complex_poles(poles)
if A.ndim > 2:
raise ValueError("A must be a 2D array/matrix.")
if B.ndim > 2:
raise ValueError("B must be a 2D array/matrix")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if len(poles) > A.shape[0]:
raise ValueError("maximum number of poles is %d but you asked for %d" %
(A.shape[0], len(poles)))
if len(poles) < A.shape[0]:
raise ValueError("number of poles is %d but you should provide %d" %
(len(poles), A.shape[0]))
r = np.linalg.matrix_rank(B)
for p in poles:
if sum(p == poles) > r:
raise ValueError("at least one of the requested pole is repeated "
"more than rank(B) times")
# Choose update method
update_loop = _YT_loop
if method not in ('KNV0','YT'):
raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
if method == "KNV0":
update_loop = _KNV0_loop
if not all(np.isreal(poles)):
raise ValueError("Complex poles are not supported by KNV0")
if maxiter < 1:
raise ValueError("maxiter must be at least equal to 1")
# We do not check rtol <= 0 as the user can use a negative rtol to
# force maxiter iterations
if rtol > 1:
raise ValueError("rtol can not be greater than 1")
return update_loop, poles
def _order_complex_poles(poles):
"""
Check we have complex conjugates pairs and reorder P according to YT, ie
real_poles, complex_i, conjugate complex_i, ....
The lexicographic sort on the complex poles is added to help the user to
compare sets of poles.
"""
ordered_poles = np.sort(poles[np.isreal(poles)])
im_poles = []
for p in np.sort(poles[np.imag(poles) < 0]):
if np.conj(p) in poles:
im_poles.extend((p, np.conj(p)))
ordered_poles = np.hstack((ordered_poles, im_poles))
if poles.shape[0] != len(ordered_poles):
raise ValueError("Complex poles must come with their conjugates")
return ordered_poles
def _KNV0(B, ker_pole, transfer_matrix, j, poles):
"""
Algorithm "KNV0" Kautsky et Al. Robust pole
assignment in linear state feedback, Int journal of Control
1985, vol 41 p 1129->1155
http://la.epfl.ch/files/content/sites/la/files/
users/105941/public/KautskyNicholsDooren
"""
# Remove xj form the base
transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
# If we QR this matrix in full mode Q=Q0|Q1
# then Q1 will be a single column orthogonnal to
# Q0, that's what we are looking for !
# After merge of gh-4249 great speed improvements could be achieved
# using QR updates instead of full QR in the line below
# To debug with numpy qr uncomment the line below
# Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
Q, R = s_qr(transfer_matrix_not_j, mode="full")
mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
yj = np.dot(mat_ker_pj, Q[:, -1])
# If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
# projection into ker_pole[j] will yield a vector
# close to 0. As we are looking for a vector in ker_pole[j]
# simply stick with transfer_matrix[:, j] (unless someone provides me with
# a better choice ?)
if not np.allclose(yj, 0):
xj = yj/np.linalg.norm(yj)
transfer_matrix[:, j] = xj
# KNV does not support complex poles, using YT technique the two lines
# below seem to work 9 out of 10 times but it is not reliable enough:
# transfer_matrix[:, j]=real(xj)
# transfer_matrix[:, j+1]=imag(xj)
# Add this at the beginning of this function if you wish to test
# complex support:
# if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
# return
# Problems arise when imag(xj)=>0 I have no idea on how to fix this
def _YT_real(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.1 page 19 related to real pairs
"""
# step 1 page 19
u = Q[:, -2, np.newaxis]
v = Q[:, -1, np.newaxis]
# step 2 page 19
m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
np.dot(v, u.T)), ker_pole[j])
# step 3 page 19
um, sm, vm = np.linalg.svd(m)
# mu1, mu2 two first columns of U => 2 first lines of U.T
mu1, mu2 = um.T[:2, :, np.newaxis]
# VM is V.T with numpy we want the first two lines of V.T
nu1, nu2 = vm[:2, :, np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
transfer_matrix[:, i, np.newaxis],
transfer_matrix[:, j, np.newaxis]))
if not np.allclose(sm[0], sm[1]):
ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
else:
ker_pole_ij = np.vstack((
np.hstack((ker_pole[i],
np.zeros(ker_pole[i].shape))),
np.hstack((np.zeros(ker_pole[j].shape),
ker_pole[j]))
))
mu_nu_matrix = np.vstack(
(np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
)
ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_ij, 0):
transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
np.linalg.norm(transfer_matrix_ij))
transfer_matrix[:, i] = transfer_matrix_ij[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = transfer_matrix_ij[
transfer_matrix[:, i].shape[0]:, 0
]
else:
# As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
# Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
# ker_pole_mu_nu and iterate. As we are looking for a vector in
# Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
# (that's a guess, not a claim !)
transfer_matrix[:, i] = ker_pole_mu_nu[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = ker_pole_mu_nu[
transfer_matrix[:, i].shape[0]:, 0
]
def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.2 page 20 related to complex pairs
"""
# step 1 page 20
ur = np.sqrt(2)*Q[:, -2, np.newaxis]
ui = np.sqrt(2)*Q[:, -1, np.newaxis]
u = ur + 1j*ui
# step 2 page 20
ker_pole_ij = ker_pole[i]
m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
np.dot(np.conj(u), u.T)), ker_pole_ij)
# step 3 page 20
e_val, e_vec = np.linalg.eig(m)
# sort eigenvalues according to their module
e_val_idx = np.argsort(np.abs(e_val))
mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
# remember transfer_matrix_i has been split as
# transfer_matrix[i]=real(transfer_matrix_i) and
# transfer_matrix[j]=imag(transfer_matrix_i)
transfer_matrix_j_mo_transfer_matrix_j = (
transfer_matrix[:, i, np.newaxis] +
1j*transfer_matrix[:, j, np.newaxis]
)
if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
np.abs(e_val[e_val_idx[-2]])):
ker_pole_mu = np.dot(ker_pole_ij, mu1)
else:
mu1_mu2_matrix = np.hstack((mu1, mu2))
ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_i_j, 0):
transfer_matrix_i_j = (transfer_matrix_i_j /
np.linalg.norm(transfer_matrix_i_j))
transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
else:
# same idea as in YT_real
transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Algorithm "YT" Tits, Yang. Globally Convergent
Algorithms for Robust Pole Assignment by State Feedback
http://drum.lib.umd.edu/handle/1903/5598
The poles P have to be sorted accordingly to section 6.2 page 20
"""
# The IEEE edition of the YT paper gives useful information on the
# optimal update order for the real poles in order to minimize the number
# of times we have to loop over all poles, see page 1442
nb_real = poles[np.isreal(poles)].shape[0]
# hnb => Half Nb Real
hnb = nb_real // 2
# Stick to the indices in the paper and then remove one to get numpy array
# index it is a bit easier to link the code to the paper this way even if it
# is not very clean. The paper is unclear about what should be done when
# there is only one real pole => use KNV0 on this real pole seem to work
if nb_real > 0:
#update the biggest real pole with the smallest one
update_order = [[nb_real], [1]]
else:
update_order = [[],[]]
r_comp = np.arange(nb_real+1, len(poles)+1, 2)
# step 1.a
r_p = np.arange(1, hnb+nb_real % 2)
update_order[0].extend(2*r_p)
update_order[1].extend(2*r_p+1)
# step 1.b
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 1.c
r_p = np.arange(1, hnb+1)
update_order[0].extend(2*r_p-1)
update_order[1].extend(2*r_p)
# step 1.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.a
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+j)
# step 2.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.c
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(hnb+1, nb_real+1):
idx_1 = i+j
if idx_1 > nb_real:
idx_1 = i+j-nb_real
update_order[0].append(i)
update_order[1].append(idx_1)
# step 2.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 3.a
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+hnb)
# step 3.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
update_order = np.array(update_order).T-1
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for i, j in update_order:
if i == j:
assert i == 0, "i!=0 for KNV call in YT"
assert np.isreal(poles[i]), "calling KNV on a complex pole"
_KNV0(B, ker_pole, transfer_matrix, i, poles)
else:
transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
axis=1)
# after merge of gh-4249 great speed improvements could be
# achieved using QR updates instead of full QR in the line below
#to debug with numpy qr uncomment the line below
#Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
if np.isreal(poles[i]):
assert np.isreal(poles[j]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_real(ker_pole, Q, transfer_matrix, i, j)
else:
assert ~np.isreal(poles[i]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_complex(ker_pole, Q, transfer_matrix, i, j)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs(
(det_transfer_matrix -
det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Loop over all poles one by one and apply KNV method 0 algorithm
"""
# This method is useful only because we need to be able to call
# _KNV0 from YT without looping over all poles, otherwise it would
# have been fine to mix _KNV0_loop and _KNV0 in a single function
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for j in range(B.shape[0]):
_KNV0(B, ker_pole, transfer_matrix, j, poles)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
"""
Compute K such that eigenvalues (A - dot(B, K))=poles.
K is the gain matrix such as the plant described by the linear system
``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
as close as possible to those asked for in poles.
SISO, MISO and MIMO systems are supported.
Parameters
----------
A, B : ndarray
State-space representation of linear system ``AX + BU``.
poles : array_like
Desired real poles and/or complex conjugates poles.
Complex poles are only supported with ``method="YT"`` (default).
method: {'YT', 'KNV0'}, optional
Which method to choose to find the gain matrix K. One of:
- 'YT': Yang Tits
- 'KNV0': Kautsky, Nichols, Van Dooren update method 0
See References and Notes for details on the algorithms.
rtol: float, optional
After each iteration the determinant of the eigenvectors of
``A - B*K`` is compared to its previous value, when the relative
error between these two values becomes lower than `rtol` the algorithm
stops. Default is 1e-3.
maxiter: int, optional
Maximum number of iterations to compute the gain matrix.
Default is 30.
Returns
-------
full_state_feedback : Bunch object
full_state_feedback is composed of:
gain_matrix : 1-D ndarray
The closed loop matrix K such as the eigenvalues of ``A-BK``
are as close as possible to the requested poles.
computed_poles : 1-D ndarray
The poles corresponding to ``A-BK`` sorted as first the real
poles in increasing order, then the complex congugates in
lexicographic order.
requested_poles : 1-D ndarray
The poles the algorithm was asked to place sorted as above,
they may differ from what was achieved.
X : 2D ndarray
The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
(see Notes)
rtol : float
The relative tolerance achieved on ``det(X)`` (see Notes).
`rtol` will be NaN if the optimisation algorithms can not run,
i.e when ``B.shape[1] == 1``, or 0 when the solution is unique.
nb_iter : int
The number of iterations performed before converging.
`nb_iter` will be NaN if the optimisation algorithms can
not run, i.e when ``B.shape[1] == 1``, or 0 when the solution
is unique.
Notes
-----
The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer
matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
rank-2 updates. This yields on average more robust solutions (see [2]_
pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
does not in its original version. Only update method 0 proposed by KNV has
been implemented here, hence the name ``'KNV0'``.
KNV extended to complex poles is used in Matlab's ``place`` function, YT is
distributed under a non-free licence by Slicot under the name ``robpole``.
It is unclear and undocumented how KNV0 has been extended to complex poles
(Tits and Yang claim on page 14 of their paper that their method can not be
used to extend KNV to complex poles), therefore only YT supports them in
this implementation.
As the solution to the problem of pole placement is not unique for MIMO
systems, both methods start with a tentative transfer matrix which is
altered in various way to increase its determinant. Both methods have been
proven to converge to a stable solution, however depending on the way the
initial transfer matrix is chosen they will converge to different
solutions and therefore there is absolutely no guarantee that using
``'KNV0'`` will yield results similar to Matlab's or any other
implementation of these algorithms.
Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
is only provided because it is needed by ``'YT'`` in some specific cases.
Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
when ``abs(det(X))`` is used as a robustness indicator.
[2]_ is available as a technical report on the following URL:
http://drum.lib.umd.edu/handle/1903/5598
References
----------
.. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
in linear state feedback", International Journal of Control, Vol. 41
pp. 1129-1155, 1985.
.. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
pole assignment by state feedback, IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
A simple example demonstrating real pole placement using both KNV and YT
algorithms. This is example number 1 from section 4 of the reference KNV
publication ([1]_):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ],
... [-0.5814, -4.290, 0, 0.6750 ],
... [ 1.067, 4.273, -6.654, 5.893 ],
... [ 0.0480, 4.273, 1.343, -2.104 ]])
>>> B = np.array([[ 0, 5.679 ],
... [ 1.136, 1.136 ],
... [ 0, 0, ],
... [-3.146, 0 ]])
>>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
Now compute K with KNV method 0, with the default YT method and with the YT
method while forcing 100 iterations of the algorithm and print some results
after each call.
>>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
>>> fsf1.gain_matrix
array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785],
[ 0.50587268, 0.57779091, 0.51795763, -0.41991442]])
>>> fsf2 = signal.place_poles(A, B, P) # uses YT method
>>> fsf2.computed_poles
array([-8.6659, -5.0566, -0.5 , -0.2 ])
>>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
>>> fsf3.X
array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j],
[-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j],
[-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
[ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
The absolute value of the determinant of X is a good indicator to check the
robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
it. Below a comparison of the robustness of the results above:
>>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
True
>>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
True
Now a simple example for complex poles:
>>> A = np.array([[ 0, 7/3., 0, 0 ],
... [ 0, 0, 0, 7/9. ],
... [ 0, 0, 0, 0 ],
... [ 0, 0, 0, 0 ]])
>>> B = np.array([[ 0, 0 ],
... [ 0, 0 ],
... [ 1, 0 ],
... [ 0, 1 ]])
>>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
>>> fsf = signal.place_poles(A, B, P, method='YT')
We can plot the desired and computed poles in the complex plane:
>>> t = np.linspace(0, 2*np.pi, 401)
>>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle
>>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
... 'wo', label='Desired')
>>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
... label='Placed')
>>> plt.grid()
>>> plt.axis('image')
>>> plt.axis([-1.1, 1.1, -1.1, 1.1])
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
"""
# Move away all the inputs checking, it only adds noise to the code
update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
# The current value of the relative tolerance we achieved
cur_rtol = np.nan
# The number of iterations needed before converging
nb_iter = np.nan
# Step A: QR decomposition of B page 1132 KN
# to debug with numpy qr uncomment the line below
# u, z = np.linalg.qr(B, mode="complete")
u, z = s_qr(B, mode="full")
rankB = np.linalg.matrix_rank(B)
u0 = u[:, :rankB]
u1 = u[:, rankB:]
z = z[:rankB, :]
# If the solution is unique
if B.shape[0] == rankB:
# if B is square and full rank there is only one solution
# such as (A+BK)=diag(P) i.e BK=diag(P)-A
# if B has as many lines as its rank (but not square) the solution
# is the same as above using least squares
# => use lstsq in both cases
# for complex poles we use the following trick
#
# |a -b| has for eigenvalues a+b and a-b
# |b a|
#
# |a+bi 0| has the obvious eigenvalues a+bi and a-bi
# |0 a-bi|
#
# e.g solving the first one in R gives the solution
# for the second one in C
diag_poles = np.zeros(A.shape)
idx = 0
while idx < poles.shape[0]:
p = poles[idx]
diag_poles[idx, idx] = np.real(p)
if ~np.isreal(p):
diag_poles[idx, idx+1] = -np.imag(p)
diag_poles[idx+1, idx+1] = np.real(p)
diag_poles[idx+1, idx] = np.imag(p)
idx += 1 # skip next one
idx += 1
gain_matrix = np.linalg.lstsq(B, diag_poles-A)[0]
transfer_matrix = np.eye(A.shape[0])
cur_rtol = 0
nb_iter = 0
else:
# step A (p1144 KNV) and begining of step F: decompose
# dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
# in the same loop
ker_pole = []
# flag to skip the conjugate of a complex pole
skip_conjugate = False
# select orthonormal base ker_pole for each Pole and vectors for
# transfer_matrix
for j in range(B.shape[0]):
if skip_conjugate:
skip_conjugate = False
continue
pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
# after QR Q=Q0|Q1
# only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix.
# Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
# R when using mode "complete". In default mode Q1 and the zeros
# in R are not computed
# To debug with numpy qr uncomment the line below
# Q, _ = np.linalg.qr(pole_space_j, mode="complete")
Q, _ = s_qr(pole_space_j, mode="full")
ker_pole_j = Q[:, pole_space_j.shape[1]:]
# We want to select one vector in ker_pole_j to build the transfer
# matrix, however qr returns sometimes vectors with zeros on the same
# line for each pole and this yields very long convergence times.
# Or some other times a set of vectors, one with zero imaginary
# part and one (or several) with imaginary parts. After trying
# many ways to select the best possible one (eg ditch vectors
# with zero imaginary part for complex poles) I ended up summing
# all vectors in ker_pole_j, this solves 100% of the problems and is
# still a valid choice for transfer_matrix. Indeed for complex poles
# we are sure to have a non zero imaginary part that way, and the
# problem of lines full of zeros in transfer_matrix is solved too as
# when a vector from ker_pole_j has a zero the other one(s)
# (when ker_pole_j.shape[1]>1) for sure won't have a zero there.
transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
transfer_matrix_j = (transfer_matrix_j /
np.linalg.norm(transfer_matrix_j))
if ~np.isreal(poles[j]): # complex pole
transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
np.imag(transfer_matrix_j)])
ker_pole.extend([ker_pole_j, ker_pole_j])
# Skip next pole as it is the conjugate
skip_conjugate = True
else: # real pole, nothing to do
ker_pole.append(ker_pole_j)
if j == 0:
transfer_matrix = transfer_matrix_j
else:
transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
if rankB > 1: # otherwise there is nothing we can optimize
stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
poles, B, maxiter, rtol)
if not stop and rtol > 0:
# if rtol<=0 the user has probably done that on purpose,
# don't annoy him
err_msg = (
"Convergence was not reached after maxiter iterations.\n"
"You asked for a relative tolerance of %f we got %f" %
(rtol, cur_rtol)
)
warnings.warn(err_msg)
# reconstruct transfer_matrix to match complex conjugate pairs,
# ie transfer_matrix_j/transfer_matrix_j+1 are
# Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
transfer_matrix = transfer_matrix.astype(complex)
idx = 0
while idx < poles.shape[0]-1:
if ~np.isreal(poles[idx]):
rel = transfer_matrix[:, idx].copy()
img = transfer_matrix[:, idx+1]
# rel will be an array referencing a column of transfer_matrix
# if we don't copy() it will changer after the next line and
# and the line after will not yield the correct value
transfer_matrix[:, idx] = rel-1j*img
transfer_matrix[:, idx+1] = rel+1j*img
idx += 1 # skip next one
idx += 1
try:
m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
transfer_matrix.T)).T
gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
except np.linalg.LinAlgError:
raise ValueError("The poles you've chosen can't be placed. "
"Check the controllability matrix and try "
"another set of poles")
# Beware: Kautsky solves A+BK but the usual form is A-BK
gain_matrix = -gain_matrix
# K still contains complex with ~=0j imaginary parts, get rid of them
gain_matrix = np.real(gain_matrix)
full_state_feedback = Bunch()
full_state_feedback.gain_matrix = gain_matrix
full_state_feedback.computed_poles = _order_complex_poles(
np.linalg.eig(A - np.dot(B, gain_matrix))[0]
)
full_state_feedback.requested_poles = poles
full_state_feedback.X = transfer_matrix
full_state_feedback.rtol = cur_rtol
full_state_feedback.nb_iter = nb_iter
return full_state_feedback
|
bsd-3-clause
|
hunterdong/data-science-from-scratch
|
code/natural_language_processing.py
|
48
|
10032
|
from __future__ import division
import math, random, re
from collections import defaultdict, Counter
from bs4 import BeautifulSoup
import requests
def plot_resumes(plt):
data = [ ("big data", 100, 15), ("Hadoop", 95, 25), ("Python", 75, 50),
("R", 50, 40), ("machine learning", 80, 20), ("statistics", 20, 60),
("data science", 60, 70), ("analytics", 90, 3),
("team player", 85, 85), ("dynamic", 2, 90), ("synergies", 70, 0),
("actionable insights", 40, 30), ("think out of the box", 45, 10),
("self-starter", 30, 50), ("customer focus", 65, 15),
("thought leadership", 35, 35)]
def text_size(total):
"""equals 8 if total is 0, 28 if total is 200"""
return 8 + total / 200 * 20
for word, job_popularity, resume_popularity in data:
plt.text(job_popularity, resume_popularity, word,
ha='center', va='center',
size=text_size(job_popularity + resume_popularity))
plt.xlabel("Popularity on Job Postings")
plt.ylabel("Popularity on Resumes")
plt.axis([0, 100, 0, 100])
plt.show()
#
# n-gram models
#
def fix_unicode(text):
return text.replace(u"\u2019", "'")
def get_document():
url = "http://radar.oreilly.com/2010/06/what-is-data-science.html"
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
content = soup.find("div", "entry-content") # find entry-content div
regex = r"[\w']+|[\.]" # matches a word or a period
document = []
for paragraph in content("p"):
words = re.findall(regex, fix_unicode(paragraph.text))
document.extend(words)
return document
def generate_using_bigrams(transitions):
current = "." # this means the next word will start a sentence
result = []
while True:
next_word_candidates = transitions[current] # bigrams (current, _)
current = random.choice(next_word_candidates) # choose one at random
result.append(current) # append it to results
if current == ".": return " ".join(result) # if "." we're done
def generate_using_trigrams(starts, trigram_transitions):
current = random.choice(starts) # choose a random starting word
prev = "." # and precede it with a '.'
result = [current]
while True:
next_word_candidates = trigram_transitions[(prev, current)]
next = random.choice(next_word_candidates)
prev, current = current, next
result.append(current)
if current == ".":
return " ".join(result)
def is_terminal(token):
return token[0] != "_"
def expand(grammar, tokens):
for i, token in enumerate(tokens):
# ignore terminals
if is_terminal(token): continue
# choose a replacement at random
replacement = random.choice(grammar[token])
if is_terminal(replacement):
tokens[i] = replacement
else:
tokens = tokens[:i] + replacement.split() + tokens[(i+1):]
return expand(grammar, tokens)
# if we get here we had all terminals and are done
return tokens
def generate_sentence(grammar):
return expand(grammar, ["_S"])
#
# Gibbs Sampling
#
def roll_a_die():
return random.choice([1,2,3,4,5,6])
def direct_sample():
d1 = roll_a_die()
d2 = roll_a_die()
return d1, d1 + d2
def random_y_given_x(x):
"""equally likely to be x + 1, x + 2, ... , x + 6"""
return x + roll_a_die()
def random_x_given_y(y):
if y <= 7:
# if the total is 7 or less, the first die is equally likely to be
# 1, 2, ..., (total - 1)
return random.randrange(1, y)
else:
# if the total is 7 or more, the first die is equally likely to be
# (total - 6), (total - 5), ..., 6
return random.randrange(y - 6, 7)
def gibbs_sample(num_iters=100):
x, y = 1, 2 # doesn't really matter
for _ in range(num_iters):
x = random_x_given_y(y)
y = random_y_given_x(x)
return x, y
def compare_distributions(num_samples=1000):
counts = defaultdict(lambda: [0, 0])
for _ in range(num_samples):
counts[gibbs_sample()][0] += 1
counts[direct_sample()][1] += 1
return counts
#
# TOPIC MODELING
#
def sample_from(weights):
total = sum(weights)
rnd = total * random.random() # uniform between 0 and total
for i, w in enumerate(weights):
rnd -= w # return the smallest i such that
if rnd <= 0: return i # sum(weights[:(i+1)]) >= rnd
documents = [
["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]
]
K = 4
document_topic_counts = [Counter()
for _ in documents]
topic_word_counts = [Counter() for _ in range(K)]
topic_counts = [0 for _ in range(K)]
document_lengths = map(len, documents)
distinct_words = set(word for document in documents for word in document)
W = len(distinct_words)
D = len(documents)
def p_topic_given_document(topic, d, alpha=0.1):
"""the fraction of words in document _d_
that are assigned to _topic_ (plus some smoothing)"""
return ((document_topic_counts[d][topic] + alpha) /
(document_lengths[d] + K * alpha))
def p_word_given_topic(word, topic, beta=0.1):
"""the fraction of words assigned to _topic_
that equal _word_ (plus some smoothing)"""
return ((topic_word_counts[topic][word] + beta) /
(topic_counts[topic] + W * beta))
def topic_weight(d, word, k):
"""given a document and a word in that document,
return the weight for the k-th topic"""
return p_word_given_topic(word, k) * p_topic_given_document(k, d)
def choose_new_topic(d, word):
return sample_from([topic_weight(d, word, k)
for k in range(K)])
random.seed(0)
document_topics = [[random.randrange(K) for word in document]
for document in documents]
for d in range(D):
for word, topic in zip(documents[d], document_topics[d]):
document_topic_counts[d][topic] += 1
topic_word_counts[topic][word] += 1
topic_counts[topic] += 1
for iter in range(1000):
for d in range(D):
for i, (word, topic) in enumerate(zip(documents[d],
document_topics[d])):
# remove this word / topic from the counts
# so that it doesn't influence the weights
document_topic_counts[d][topic] -= 1
topic_word_counts[topic][word] -= 1
topic_counts[topic] -= 1
document_lengths[d] -= 1
# choose a new topic based on the weights
new_topic = choose_new_topic(d, word)
document_topics[d][i] = new_topic
# and now add it back to the counts
document_topic_counts[d][new_topic] += 1
topic_word_counts[new_topic][word] += 1
topic_counts[new_topic] += 1
document_lengths[d] += 1
if __name__ == "__main__":
document = get_document()
bigrams = zip(document, document[1:])
transitions = defaultdict(list)
for prev, current in bigrams:
transitions[prev].append(current)
random.seed(0)
print "bigram sentences"
for i in range(10):
print i, generate_using_bigrams(transitions)
print
# trigrams
trigrams = zip(document, document[1:], document[2:])
trigram_transitions = defaultdict(list)
starts = []
for prev, current, next in trigrams:
if prev == ".": # if the previous "word" was a period
starts.append(current) # then this is a start word
trigram_transitions[(prev, current)].append(next)
print "trigram sentences"
for i in range(10):
print i, generate_using_trigrams(starts, trigram_transitions)
print
grammar = {
"_S" : ["_NP _VP"],
"_NP" : ["_N",
"_A _NP _P _A _N"],
"_VP" : ["_V",
"_V _NP"],
"_N" : ["data science", "Python", "regression"],
"_A" : ["big", "linear", "logistic"],
"_P" : ["about", "near"],
"_V" : ["learns", "trains", "tests", "is"]
}
print "grammar sentences"
for i in range(10):
print i, " ".join(generate_sentence(grammar))
print
print "gibbs sampling"
comparison = compare_distributions()
for roll, (gibbs, direct) in comparison.iteritems():
print roll, gibbs, direct
# topic MODELING
for k, word_counts in enumerate(topic_word_counts):
for word, count in word_counts.most_common():
if count > 0: print k, word, count
topic_names = ["Big Data and programming languages",
"Python and statistics",
"databases",
"machine learning"]
for document, topic_counts in zip(documents, document_topic_counts):
print document
for topic, count in topic_counts.most_common():
if count > 0:
print topic_names[topic], count,
print
|
unlicense
|
manashmndl/scikit-learn
|
sklearn/utils/tests/test_validation.py
|
133
|
18339
|
"""Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = check_array(42, ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.