repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
marktoakley/LamarckiAnt | SCRIPTS/python/ptmc/get_exchanges_Tdist.py | 1 | 14886 | import numpy as np
import scipy.interpolate
import copy
def getOverlap(x1, var1, x2, var2):
"""
calculate the overlap of two gaussians
"""
return np.exp( -(x1-x2)**2 / (var1+var2) ) / np.sqrt(np.pi) / np.sqrt(var1+var2)
class Overlap(object):
def __init__(self, means, vars, Tmeans, overlapmin = 0.001):
#set up interpolation of means and standard deviations for overlap calculation
self.meaninterp = scipy.interpolate.UnivariateSpline( Tmeans, means, s=0)
self.varinterp = scipy.interpolate.UnivariateSpline( Tmeans, vars, s=0)
def getOverlaps(self, temps):
x = self.meaninterp(temps)
var = self.varinterp(temps)
overlaps = np.zeros(len(temps)-1)
for i in range(len(temps)-1):
overlaps[i] = getOverlap( x[i], var[i], x[i+1], var[i+1] )
return overlaps
def __call__(self, temps):
return self.getOverlaps(temps)
class Eta(object):
def __init__(self, Teta, eta, Tmin, Tmax):
#if eta doesn't cover the whole range [Tmin,Tmax], extend it linearly
if Teta[0] > Tmin:
etaTmin = eta[0] + (eta[1] - eta[0])/(Teta[1] - Teta[0]) * (Tmin - Teta[0])
newT = np.zeros( 1+len(Teta))
newT[0] = Tmin
newT[1:] = Teta[:]
neweta = np.zeros( 1+len(Teta))
neweta[0] = etaTmin
neweta[1:] = eta[:]
Teta = newT
eta = neweta
if Teta[-1] < Tmax:
etaTmax = eta[-1] + (eta[-1] - eta[-2])/(Teta[-1] - Teta[-2]) * (Tmax - Teta[-1])
newT = np.zeros( 1+len(Teta))
newT[-1] = Tmax
newT[:-1] = Teta[:]
neweta = np.zeros( 1+len(Teta))
neweta[-1] = etaTmax
neweta[:-1] = eta[:]
Teta = newT
eta = neweta
self.eta = eta
self.Teta = Teta
print "after interpolation Teta", Teta
print "after interpolation Teta", eta
#import matplotlib.pyplot as plt
#plt.plot( self.Teta, self.eta, '-x')
#plt.show()
#exit(1)
self.etainterp = scipy.interpolate.UnivariateSpline(self.Teta, self.eta, s=0)
self.etanorm = self.etainterp.integral(Tmin, Tmax)
def getEta(self, T):
"""
interpolate to find the density at a given temperature
"""
#return np.interp(T, self.Teta, self.eta)
return self.etainterp(T) / self.etanorm
def __call__(self, T):
return self.getEta(T)
def integrate(self, T1, T2):
return self.etainterp.integral(T1, T2) / self.etanorm
class TCalc(object):
"""
return temperatures distributed as evenly as possible on the interval
Tmin, Tmax according to the distribution eta, subject to the constraint
overlap(T[i], T[i+]) > overlapmin
"""
def __init__(self, Tmin, Tmax, nreps, eta, getOverlaps, overlapmin):
self.Tmin = Tmin
self.Tmax = Tmax
self.nreps = nreps
self.temps = np.ones(self.nreps)
self.temps[0] = Tmin
self.temps[-1] = Tmax
self.dtemps = np.zeros(self.nreps-1)
self.overlapmin = overlapmin
self.count = 0
self.eta = eta
self.getOverlaps = getOverlaps
self.fixed = [False for i in range(self.nreps-1) ] #contains the temperature intervals that are subject to the overlap constraint
self.fixed = np.array(self.fixed)
def getVolume(self):
"""
return volume per interval
"""
self.setTemps()
if not any(self.fixed):
return 1./(self.nreps-1)
V = 0.
for i in range(self.nreps-1):
if not self.fixed[i]: continue
vol = self.eta.integrate(self.temps[i], self.temps[i+1])
#print "integral", self.temps[i], self.temps[i+1], vol
V += vol
nfixed = sum(self.fixed)
print "nfixed", nfixed, V
return (1.-V)/ (self.nreps-1 - nfixed)
def placeTemps(self):
"""
starting from Tmin, place temps such that the integral over eta of the temperature
intervals is equal.
"""
fixed = self.fixed
temps = self.temps
vol = self.getVolume()
dT = 1e-3
T = self.Tmin
irep = 1
while irep < self.nreps-1:
T += dT
if T >= self.Tmax:
#raise Exception("T>Tmax. irep=%d" %(irep) )
print "warning: T>Tmax. irep=%d" %(irep)
fixed[irep:] = False
if False:
#randomly unfix an interval
nfixed = sum(fixed)
irand = np.random.random_integers(1, nfixed)
count = 0
for i in range(len(fixed)):
if fixed[i]:
count+=1
if count == irand:
print "randomly unfixing interval", i
fixed[i] = False
break
if False:
#unfix the interval with the largest integrated volume
vmax = 0
imax = 0
for i in range(len(fixed)):
if fixed[i]:
vol = self.eta.integrate(self.temps[i], self.temps[i+1])
if vol > vmax:
vmax = vol
imax = i
print "unfixing interval", imax, "with volume", vmax
fixed[imax] = False
return False
v = self.eta.integrate(self.temps[irep-1], T)
if v >= vol:
#place temperature
self.temps[irep] = T
if fixed[irep-1]:
#an interval has become unfixed. mark it as unfixed and abort
print "unfixing interval", irep-1
fixed[irep-1] = False
fixed[irep:] = False #unfix all the ones we didn't get to
return False
irep += 1
continue
q = self.getOverlaps( np.array([self.temps[irep-1], T]) )
if q < self.overlapmin:
self.temps[irep] = T
if not fixed[irep-1]:
#we have a newly fixed interval. mark it as fixed and abort
print "fixing interval", irep-1
fixed[irep-1] = True
#fixed[irep:] = False #unfix all the ones we didn't get to
return False
irep += 1
continue
"""
we've made it to the end, check if the overlap with Tmax is ok
"""
q = self.getOverlaps( np.array([self.temps[-2], self.temps[-1]]) )
if q < self.overlapmin:
fixed[-1] = True
return False
return True
def findTemps1(self):
while True:
ret = self.placeTemps()
print "T = ", self.temps
if ret:
break
def findTemps(self):
while True:
ret = self.placeTempsInternal()
self.setTemps()
print "T = ", self.temps
if ret:
break
def dTtoTemp(self, i):
if i == self.nreps-1: return self.Tmax
return self.Tmin + np.sum(self.dtemps[:i])
def setTemps(self):
for i in range(self.nreps):
self.temps[i] = self.dTtoTemp(i)
def placeTempsInternal(self):
"""
starting from Tmin, place temps such that the integral over eta of the temperature
intervals is equal.
"""
#self.temps = np.array([ self.dTtoTemp(i) for i in range(self.ntemps) ])
self.setTemps()
fixed = self.fixed
temps = self.temps
vol = self.getVolume()
dT = 1e-3
T = self.Tmin
irep = 1
while irep < self.nreps-1:
T += dT
if T >= self.Tmax:
#raise Exception("T>Tmax. irep=%d" %(irep) )
print "warning: T>Tmax. irep=%d" %(irep)
fixed[irep:] = False
if False:
#randomly unfix an interval
nfixed = sum(fixed)
irand = np.random.random_integers(1, nfixed)
count = 0
for i in range(len(fixed)):
if fixed[i]:
count+=1
if count == irand:
print "randomly unfixing interval", i
fixed[i] = False
break
if False:
#unfix the interval with the largest integrated volume
vmax = 0
imax = 0
for i in range(len(fixed)):
if fixed[i]:
vol = self.eta.integrate(self.dTtoTemp(i), self.dTtoTemp(i+1))
if vol > vmax:
vmax = vol
imax = i
print "unfixing interval", imax, "with volume", vmax
fixed[imax] = False
return False
v = self.eta.integrate(self.dTtoTemp(irep- 1), T)
if v >= vol:
#place temperature
self.dtemps[irep-1] = T - self.dTtoTemp(irep-1)
if fixed[irep-1]:
#an interval has become unfixed. mark it as unfixed and abort
print "unfixing interval", irep-1
fixed[irep-1] = False
fixed[irep-1:] = False #unfix all the ones we didn't get to
return False
irep += 1
continue
q = self.getOverlaps( np.array([self.dTtoTemp(irep-1), T]) )
if q < self.overlapmin:
self.dtemps[irep-1] = T - self.dTtoTemp(irep-1)
if not fixed[irep-1]:
#we have a newly fixed interval. mark it as fixed and abort
print "fixing interval", irep-1, self.dtemps[irep-1], T, self.dTtoTemp(irep-1)
fixed[irep-1] = True
#fixed[irep:] = False #unfix all the ones we didn't get to
return False
irep += 1
continue
"""
we've made it to the end, check if the overlap with Tmax is ok
"""
#q = self.getOverlaps( np.array([self.temps[-2], self.temps[-1]]) )
q = self.getOverlaps( np.array( [self.dTtoTemp(self.nreps-1), self.dTtoTemp(self.nreps)] ) )
if q < self.overlapmin:
fixed[-1] = True
return False
#if the volume of the last interval is not good then run it one more time
v = self.eta.integrate( self.dTtoTemp(self.nreps-1), self.dTtoTemp(self.nreps) )
if np.abs((v - vol)/vol) > 1e-3:
return False
return True
from pele.potentials.potential import potential as basepot
class TCalcNew(basepot):
"""
return temperatures distributed as evenly as possible on the interval
Tmin, Tmax according to the distribution eta, subject to the constraint
overlap(T[i], T[i+]) > overlapmin
"""
def __init__(self, Tmin, Tmax, nreps, eta, getOverlaps, overlapmin):
self.Tmin = Tmin
self.Tmax = Tmax
self.nreps = nreps
self.temps = np.ones(self.nreps)
self.temps[0] = Tmin
self.temps[-1] = Tmax
self.dtemps = np.zeros(self.nreps-1)
self.overlapmin = overlapmin
self.count = 0
self.eta = eta
self.getOverlaps = getOverlaps
self.fixed = [False for i in range(self.nreps-1) ] #contains the temperature intervals that are subject to the overlap constraint
self.fixed = np.array(self.fixed)
def placeTemps(self, vol):
"""
starting from Tmin, place temps such that the integral over eta of the temperature
intervals is equal.
"""
fixed = self.fixed
temps = self.temps
#vol = self.getVolume()
dT = 1e-4
T = self.Tmin
irep = 1
while irep < self.nreps-1:
T += dT
if T >= self.Tmax:
break
v = self.eta.integrate(self.temps[irep-1], T)
if v >= vol:
#place temperature
self.temps[irep] = T
irep += 1
continue
q = self.getOverlaps( np.array([self.temps[irep-1], T]) )
if q < self.overlapmin:
self.temps[irep] = T
irep += 1
continue
return irep
def increaseVolume(self, volume):
if self.status == self.decreased:
if self.reversed == self.max_reversed:
self.dV /= 10
self.reversed += 1
self.status = self.increased
return volume + self.dV
def decreaseVolume(self, volume):
if self.status == self.increased:
if self.reversed == self.max_reversed:
self.dV /= 10
self.reversed += 1
self.status = self.decreased
return volume - self.dV
def findTemps(self):
self.increased = 1
self.decreased = -1
self.status = 0
self.reversed = 0
self.max_reversed = 10
volume = 1./(self.nreps-1)
dV = volume / 100
self.dV = dV
for count in range(200):
nplaced = self.placeTemps(volume)
if nplaced < self.nreps-1:
#not enough placed. volume is too big
volume = self.decreaseVolume(volume)
elif nplaced == self.nreps-1:
q = self.getOverlaps( np.array([self.temps[-2], self.Tmax]) )
v = self.eta.integrate(self.temps[-2], self.Tmax)
#the last rep was placed. check the volume of the last interval
if q < self.overlapmin:
volume = self.increaseVolume(volume)
elif v > volume:
#increase volume a bit
volume = self.increaseVolume(volume)
else:
volume = self.decreaseVolume(volume)
print count, "volume", volume, nplaced, q, v
print "T = ", self.temps
if self.reversed > self.max_reversed*2:
break
| gpl-3.0 |
gotomypc/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
sanja7s/EEDC | src/correlation/node_plug_variation_num_jobs.py | 1 | 3365 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
author: sanja7s
---------------
plot the distribution
"""
import os
import datetime as dt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from collections import defaultdict
from matplotlib import colors
from pylab import MaxNLocator
import pylab as pl
from mpl_toolkits.axes_grid import inset_locator
matplotlib.style.use('ggplot')
IN_DIR = "../../data/nodes"
os.chdir(IN_DIR)
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 14}
grid = {'color' : 'gray',
'alpha' : 0.5,
'linestyle' : '-.'}
lines = {'color' : 'gray'}
#xticks = {'color' : 'gray'}
matplotlib.rc('font', **font)
matplotlib.rc('grid', **grid)
matplotlib.rc('lines', **lines)
#matplotlib.rc('ticks', **ticks)
def read_in_node_types():
f_in = 'node_types.csv'
node_types = defaultdict(int)
with open(f_in, 'r') as f:
for line in f:
# n are irrelevant
n, node, n, t, n = line.strip().split('"')
node_types[node] = t
return node_types
def read_in_data(the_node_type):
f_in = 'node_plug_jobslist.csv'
distr_plug = defaultdict(list)
distr_n_jobs = defaultdict(list)
node_types = read_in_node_types()
i = 0
with open(f_in, 'r') as f:
for line in f:
# n are irrelevant
n, node, n, plug, n, jobs_list, n = line.strip().split('"')
if node_types[node] <> the_node_type:
continue
plug = float(plug)
distr_plug[node].append(plug)
if jobs_list == "":
distr_n_jobs[node].append(0)
else:
jobs = jobs_list.split(',')
distr_n_jobs[node].append(len(jobs))
i += 1
"""
if i == 100000:
print distr_n_jobs
return distr_plug, distr_n_jobs
"""
return distr_plug, distr_n_jobs
#read_in_data()
def save_data_per_node(the_node_type):
def stats(d1):
dd1 = np.array(d1)
m1 = np.min(dd1)
mm1 = np.max(dd1)
avg1 = np.mean(dd1)
med1 = np.median(dd1)
stdev1 = np.std(dd1)
var1 = np.var(dd1)
return m1, mm1, avg1, med1, stdev1, var1
d1, d2 = read_in_data(the_node_type)
f_out = 'node_plug_n_jobs_variation' + the_node_type + '.csv'
with open(f_out, 'w') as f:
for node in d1:
m1, mm1, avg1, med1, stdev1, var1 = stats(d1[node])
m2, mm2, avg2, med2, stdev2, var2 = stats(d2[node])
f.write(str(node) + '\t' \
+ str(m1) + '\t' + str(mm1) + '\t' + str(avg1) + '\t' + str(med1) \
+ '\t' + str(stdev1) + '\t' + str(var1) + '\t' \
+ str(m2) + '\t' + str(mm2) + '\t' + str(avg2) + '\t' + str(med2) \
+ '\t' + str(stdev2) + '\t' + str(var2) + '\n' )
#save_data_per_node(the_node_type='SandyBridge')
def find_correlations(the_node_type):
f_in = 'node_plug_n_jobs_variation' + the_node_type + '.csv'
M = []
with open(f_in, 'r') as f:
for line in f:
node, min1, max1, avg1, med1, std1, var1, min2, max2, avg2, med2, std2, var2 = line.split('\t')
M.append([float(min1), float(max1), float(avg1), float(med1), float(std1), float(var1)\
, float(min2), float(max2), float(avg2), float(med2), float(std2), float(var2)])
M = np.array(M)
print M.shape
#print M
M = np.transpose(M)
print M.shape
cor_M = np.corrcoef(M)
f_out = 'cor_M_full' + the_node_type +'.tab'
with open(f_out, 'w') as fo:
for row in cor_M:
for el in row:
fo.write('%.3f \t' % (el))
fo.write('\n')
find_correlations(the_node_type='SandyBridge')
| apache-2.0 |
mlyundin/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
RobertABT/heightmap | build/matplotlib/examples/user_interfaces/embedding_in_wx5.py | 12 | 1586 | # Used to guarantee to use at least Wx2.8
import wxversion
wxversion.ensureMinimal('2.8')
import wx
import wx.aui
import matplotlib as mpl
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2Wx as Toolbar
class Plot(wx.Panel):
def __init__(self, parent, id = -1, dpi = None, **kwargs):
wx.Panel.__init__(self, parent, id=id, **kwargs)
self.figure = mpl.figure.Figure(dpi=dpi, figsize=(2,2))
self.canvas = Canvas(self, -1, self.figure)
self.toolbar = Toolbar(self.canvas)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas,1,wx.EXPAND)
sizer.Add(self.toolbar, 0 , wx.LEFT | wx.EXPAND)
self.SetSizer(sizer)
class PlotNotebook(wx.Panel):
def __init__(self, parent, id = -1):
wx.Panel.__init__(self, parent, id=id)
self.nb = wx.aui.AuiNotebook(self)
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
self.SetSizer(sizer)
def add(self,name="plot"):
page = Plot(self.nb)
self.nb.AddPage(page,name)
return page.figure
def demo():
app = wx.PySimpleApp()
frame = wx.Frame(None,-1,'Plotter')
plotter = PlotNotebook(frame)
axes1 = plotter.add('figure 1').gca()
axes1.plot([1,2,3],[2,1,4])
axes2 = plotter.add('figure 2').gca()
axes2.plot([1,2,3,4,5],[2,1,4,2,3])
#axes1.figure.canvas.draw()
#axes2.figure.canvas.draw()
frame.Show()
app.MainLoop()
if __name__ == "__main__": demo()
| mit |
vybstat/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
openclimatedata/pymagicc | pymagicc/core.py | 1 | 56674 | import shutil
import subprocess # nosec # have to use subprocess
import warnings
from copy import deepcopy
from os import listdir, makedirs
from os.path import abspath, basename, dirname, exists, isfile, join
from subprocess import PIPE # nosec # have to use subprocess
from tempfile import mkdtemp
import f90nml
import numpy as np
import pandas as pd
from dateutil.relativedelta import relativedelta
from openscm_units import unit_registry
from scmdata import run_append
from .config import _wine_installed, config
from .errors import InvalidTemporalResError, NoReaderWriterError
from .io import MAGICCData, read_cfg_file
from .io.utils import _get_openscm_var_from_filepath
from .scenarios import zero_emissions
from .utils import get_date_time_string
IS_WINDOWS = config["is_windows"]
class WineNotInstalledError(Exception):
"""Exception raised if wine is not installed but is required"""
def _copy_files(source, target, recursive=False):
"""
Copy all the files in source directory to target.
If ``recursive``, include subdirectories, otherwise ignores subdirectories.
"""
if recursive:
shutil.copytree(source, target)
return
source_files = listdir(source)
if not exists(target):
makedirs(target)
for filename in source_files:
full_filename = join(source, filename)
if isfile(full_filename):
shutil.copy(full_filename, target)
def _clean_value(v):
if isinstance(v, str):
return v.strip()
elif isinstance(v, list):
if isinstance(v[0], str):
return [i.replace("\0", "").strip().replace("\n", "") for i in v]
return v
class MAGICCBase(object):
"""
Provides access to the MAGICC binary and configuration.
To enable multiple MAGICC 'setups' to be configured independently,
the MAGICC directory containing the input files, configuration
and binary is copied to a new folder. The configuration in this
MAGICC copy can then be edited without impacting other instances or your
original MAGICC distribution.
A ``MAGICC`` instance first has to be setup by calling
``create_copy``. If many model runs are being performed this step only has
to be performed once. The ``run`` method can then be called many times
without re-copying the files each time. Between each call to ``run``, the
configuration files can be updated to perform runs with different
configurations.
Parameters
----------
root_dir : str
If ``root_dir`` is supplied, an existing MAGICC 'setup' is
used.
"""
version = None
_scen_file_name = "SCENARIO.SCEN7"
def __init__(self, root_dir=None, strict=True):
"""
Initialise
Parameters
----------
root_dir : str
Root directory of the MAGICC package. If ``None``, a temporary
copy of MAGICC is made based on the result of `
`self.get_exectuable()``.
strict: bool
If True, enforce the configuration checks, otherwise a warning
is raised if any invalid configuration is found and the run is
continued. Setting ``strict=False`` is only recommended for
experienced users of MAGICC.
"""
self.root_dir = root_dir
self.config = None
self.executable = self.get_executable()
self.strict = strict
if root_dir is not None:
self.is_temp = False
else:
# Create a temp directory
self.is_temp = True
def __enter__(self):
if self.is_temp and self.run_dir is None:
self.create_copy()
return self
def __exit__(self, *args, **kwargs):
self.remove_temp_copy()
def create_copy(self):
"""
Initialises a temporary directory structure and copy of MAGICC
configuration files and binary.
The root folder and ``bin`` folders are copied (not recursively). The
``run`` folder is copied recursively.
"""
if self.executable is None or not isfile(self.executable):
raise FileNotFoundError(
"Could not find MAGICC{} executable: {}".format(
self.version, self.executable
)
)
if self.is_temp:
if self.root_dir is not None:
raise AssertionError(
"A temp copy for this instance has already been created"
)
self.root_dir = mkdtemp(prefix="pymagicc-")
if exists(self.run_dir):
raise Exception("A copy of MAGICC has already been created.")
if not exists(self.root_dir):
makedirs(self.root_dir)
exec_dir = basename(self.original_dir)
# Copy a subset of folders from the MAGICC `original_dir`
# Also copy anything which is in the root of the MAGICC distribution
# Assumes that the MAGICC binary is in a folder one level below the root
# of the MAGICC distribution. i.e. /run/magicc.exe or /bin/magicc
dirs_to_copy = [".", "bin"]
dirs_to_copy_recursive = ["run"]
# Check that the executable is in a valid sub directory
if exec_dir not in dirs_to_copy + dirs_to_copy_recursive:
raise AssertionError("binary must be in bin/ or run/ directory")
for d in dirs_to_copy + dirs_to_copy_recursive:
source_dir = abspath(join(self.original_dir, "..", d))
if exists(source_dir):
_copy_files(
source_dir,
join(self.root_dir, d),
recursive=d in dirs_to_copy_recursive,
)
# Create an empty out dir
# MAGICC assumes that the 'out' directory already exists
makedirs(join(self.root_dir, "out"))
# Create basic configuration files so magicc can run
self.set_years()
self.set_config()
@property
def binary_name(self):
"""
Name of the MAGICC binary file
Returns
-------
str
Name of the binary file
"""
return basename(self.executable)
@property
def original_dir(self):
"""
Directory of the MAGICC package.
This is the directory which contains the ``run`` and ``out`` folders.
Returns
-------
str
Path of the MAGICC package
"""
return dirname(self.executable)
@property
def run_dir(self):
"""
Run directory of the MAGICC package.
This path always ends in ``run``.
Returns
-------
str
Path of the run directory
"""
if self.root_dir is None:
return None
return join(self.root_dir, "run")
@property
def out_dir(self):
"""
Output directory of the MAGICC package.
This path always ends in ``out``.
Returns
-------
str
Path of the output directory
"""
if self.root_dir is None:
return None
return join(self.root_dir, "out")
@property
def default_config(self):
"""
Default configuration for a run
Returns
-------
:obj:`f90nml.Namelist`
Namelist object containing the default configuration
"""
base = f90nml.read(join(self.run_dir, "MAGCFG_DEFAULTALL.CFG"))
user = f90nml.read(join(self.run_dir, "MAGCFG_USER.CFG"))
self._default_config = deepcopy(base)
def _deep_update(b, o):
for k, v in o.items():
if isinstance(v, dict):
_deep_update(b[k], v)
else:
b.update(o)
_deep_update(self._default_config, user)
return self._default_config
def run(self, scenario=None, only=None, debug=False, **kwargs):
"""
Run MAGICC and parse the output.
As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its
parameters into ``out/PARAMETERS.OUT`` and they will then be read into
``output.metadata["parameters"]`` where ``output`` is the returned object.
Any logged output from running magicc will be in``output.metadata["stderr"]``.
For MAGICC7 and above, The level of logging can be controlled with the
``debug`` argument.
Any subannual files output by MAGICC will be ignored by this function. These
files can be read in manually using :class:`pymagicc.io.MAGICCData` directly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run. If None MAGICC will simply run with whatever config has
already been set.
only : list of str
If not None, only extract variables in this list.
debug: {True, False, "verbose"}
If true, MAGICC will run in debug mode with the maximum amount of logging.
If "verbose", MAGICC will be run in verbose mode.
kwargs
Other config values to pass to MAGICC for the run
Returns
-------
:obj:`pymagicc.io.MAGICCData`
MAGICCData object containing that data in its ``df`` attribute and
metadata and parameters (depending on the value of ``include_parameters``)
in its ``metadata`` attribute.
Raises
------
ValueError
If no output is found which matches the list specified in ``only``.
subprocess.CalledProcessError
If MAGICC fails to run. Check the 'stderr' key of the result's `metadata`
attribute to inspect the results output from MAGICC.
ValueError
The user attempts to use ``debug`` with MAGICC6
"""
if not exists(self.root_dir):
raise FileNotFoundError(self.root_dir)
if self.executable is None:
raise ValueError(
"MAGICC executable not found, try setting an environment variable `MAGICC_EXECUTABLE_{}=/path/to/binary`".format(
self.version
)
)
if scenario is not None:
kwargs = self.set_emission_scenario_setup(scenario, kwargs)
yr_config = {}
if "startyear" in kwargs:
yr_config["startyear"] = kwargs.pop("startyear")
if "endyear" in kwargs:
yr_config["endyear"] = kwargs.pop("endyear")
if yr_config:
self.set_years(**yr_config)
# should be able to do some other nice metadata stuff re how magicc was run
# etc. here
kwargs.setdefault("rundate", get_date_time_string())
self.update_config(**kwargs)
self.check_config()
exec_dir = basename(self.original_dir)
command = [join(self.root_dir, exec_dir, self.binary_name)]
if self.version >= 7:
if debug == "verbose":
command.append("--verbose")
elif debug:
command.append("--debug")
elif debug:
raise ValueError("MAGICC6 has no debug capability")
if not IS_WINDOWS and self.binary_name.endswith(".exe"): # pragma: no cover
if not _wine_installed:
raise WineNotInstalledError(
"Wine is not installed but is required to run `.exe` binaries"
)
command.insert(0, "wine")
try:
res = subprocess.run( # nosec # on Windows shell=True is required
command,
check=True,
# thank you https://stackoverflow.com/a/53209196 for Python 3.6 hack
stdout=PIPE,
stderr=PIPE,
cwd=self.run_dir,
shell=IS_WINDOWS,
)
except subprocess.CalledProcessError as exc:
print("stderr:\n{}".format(exc.stderr.decode()))
raise exc
outfiles = self._get_output_filenames()
read_cols = {"climate_model": ["MAGICC{}".format(self.version)]}
if scenario is not None:
read_cols["model"] = scenario["model"].unique().tolist()
read_cols["scenario"] = scenario["scenario"].unique().tolist()
else:
read_cols.setdefault("model", ["unspecified"])
read_cols.setdefault("scenario", ["unspecified"])
mdata = []
for filepath in outfiles:
if filepath.startswith("DAT_VOLCANIC_RF.") or "SUBANN" in filepath:
warnings.warn(
"Not reading file: {}. Monthly data are not read in automatically by `run`. "
"Use `MAGICCData` instead.".format(filepath)
)
continue
try:
openscm_var = _get_openscm_var_from_filepath(filepath)
if only is None or openscm_var in only:
tempdata = MAGICCData(
join(self.out_dir, filepath), columns=deepcopy(read_cols)
)
mdata.append(tempdata)
except (NoReaderWriterError, InvalidTemporalResError):
# TODO: something like warnings.warn("Could not read {}".format(filepath))
continue
if not mdata and only is not None:
raise ValueError("No output found for only={}".format(only))
if not mdata:
if self.strict:
raise ValueError("No output found. Check configuration")
else:
# No data was loaded return an empty MAGICCData object
mdata = MAGICCData(
data={},
columns={
"model": [],
"unit": [],
"variable": [],
"region": [],
"scenario": [],
},
)
else:
mdata = run_append(mdata)
try:
run_paras = self.read_parameters()
self.config = run_paras
mdata.metadata["parameters"] = run_paras
except FileNotFoundError:
pass
mdata.metadata["stderr"] = res.stderr.decode("ascii")
levels_to_warn = ["WARNING", "ERROR", "FATAL"]
for level in levels_to_warn:
if level in mdata.metadata["stderr"]:
warnings.warn(
"magicc logged a {} message. Check the 'stderr' key of the "
"result's `metadata` attribute.".format(level)
)
return mdata
def _get_output_filenames(self):
outfiles = [f for f in listdir(self.out_dir) if f != "PARAMETERS.OUT"]
bin_out = [
f.split(".")[0]
for f in outfiles
if f.startswith("DAT_") and f.endswith(".BINOUT")
]
extras = []
for f in outfiles:
var_name, ext = f.split(".")
if ext != "BINOUT" and var_name not in bin_out:
extras.append(f)
return [f + ".BINOUT" for f in bin_out] + extras
def _check_failed(self, msg):
if self.strict:
raise ValueError(msg)
else:
warnings.warn(msg)
def check_config(self):
"""Check that our MAGICC ``.CFG`` files are set to safely work with PYMAGICC
For further detail about why this is required, please see :ref:`MAGICC flags`.
Raises
------
ValueError
If we are not certain that the config written by PYMAGICC will overwrite
all other config i.e. that there will be no unexpected behaviour. A
ValueError will also be raised if the user tries to use more than one
scenario file.
"""
cfg_error_msg = (
"PYMAGICC is not the only tuning model that will be used by "
"`MAGCFG_USER.CFG`: your run is likely to fail/do odd things"
)
emisscen_error_msg = (
"You have more than one `FILE_EMISSCEN_X` flag set. Using more than "
"one emissions scenario is hard to debug and unnecessary with "
"Pymagicc's Dataframe scenario input. Please combine all your "
"scenarios into one Dataframe with Pymagicc and Pandas, then feed "
"this single Dataframe into Pymagicc's run API."
)
nml_to_check = "nml_allcfgs"
usr_cfg = read_cfg_file(join(self.run_dir, "MAGCFG_USER.CFG"))
for k in usr_cfg[nml_to_check]:
if k.startswith("file_tuningmodel"):
first_tuningmodel = k in ["file_tuningmodel", "file_tuningmodel_1"]
if first_tuningmodel:
if usr_cfg[nml_to_check][k] != "PYMAGICC":
self._check_failed(cfg_error_msg)
elif usr_cfg[nml_to_check][k] not in ["USER", ""]:
self._check_failed(cfg_error_msg)
elif k.startswith("file_emisscen_"):
if usr_cfg[nml_to_check][k] not in ["NONE", ""]:
self._check_failed(emisscen_error_msg)
self._check_config()
def write(self, mdata, name):
"""Write an input file to disk
Parameters
----------
mdata : :obj:`pymagicc.io.MAGICCData`
A MAGICCData instance with the data to write
name : str
The name of the file to write. The file will be written to the MAGICC
instance's run directory i.e. ``self.run_dir``
"""
mdata.write(join(self.run_dir, name), self.version)
def read_parameters(self):
"""
Read a parameters.out file
Returns
-------
dict
A dictionary containing all the configuration used by MAGICC
"""
param_fname = join(self.out_dir, "PARAMETERS.OUT")
if not exists(param_fname):
raise FileNotFoundError("No PARAMETERS.OUT found")
with open(param_fname) as nml_file:
parameters = dict(f90nml.read(nml_file))
for group in ["nml_years", "nml_allcfgs", "nml_outputcfgs"]:
parameters[group] = dict(parameters[group])
for k, v in parameters[group].items():
parameters[group][k] = _clean_value(v)
parameters[group.replace("nml_", "")] = parameters.pop(group)
self.config = parameters
return parameters
def remove_temp_copy(self):
"""
Removes a temporary copy of the MAGICC version shipped with Pymagicc.
"""
if self.is_temp and self.root_dir is not None:
shutil.rmtree(self.root_dir)
self.root_dir = None
def set_config(
self,
filename="MAGTUNE_PYMAGICC.CFG",
top_level_key="nml_allcfgs",
conflict="warn",
**kwargs,
):
"""
Create a configuration file for MAGICC.
Writes a fortran namelist in run_dir.
Parameters
----------
filename : str
Name of configuration file to write
top_level_key : str
Name of namelist to be written in the
configuration file
conflict : {'warn', 'ignore'}
If 'warn', when a flag needs to be replaced by a different name (because,
for example, the flag name changed between MAGICC versions), a warning is
raised. If 'ignore', no warning is raised when a replacement is required.
kwargs
Other parameters to pass to the configuration file. No
validation on the parameters is performed.
Returns
-------
dict
The contents of the namelist which was written to file
Warning
-------
If a key is renamed, a warning is raised
Raises
------
ValueError
An invalid value for ``conflict`` is supplied
"""
kwargs = self._format_config(kwargs)
fname = join(self.run_dir, filename)
conf = {top_level_key: kwargs}
conf = self._fix_legacy_keys(conf, conflict=conflict)
f90nml.write(conf, fname, force=True)
return conf
def update_config(
self,
filename="MAGTUNE_PYMAGICC.CFG",
top_level_key="nml_allcfgs",
conflict="warn",
**kwargs,
):
"""Updates a configuration file for MAGICC
Updates the contents of a fortran namelist in the run directory,
creating a new namelist if none exists.
Parameters
----------
filename : str
Name of configuration file to write
top_level_key : str
Name of namelist to be written in the
configuration file
conflict : {'warn', 'ignore'}
If 'warn', when a flag needs to be replaced by a different name (because,
for example, the flag name changed between MAGICC versions), a warning is
raised. If 'ignore', no warning is raised when a replacement is required.
kwargs
Other parameters to pass to the configuration file. No
validation on the parameters is performed.
Returns
-------
dict
The contents of the namelist which was written to file
Warning
-------
If a key is renamed, a warning is raised
Raises
------
ValueError
An invalid value for ``conflict`` is supplied
"""
kwargs = self._format_config(kwargs)
fname = join(self.run_dir, filename)
if exists(fname):
conf = f90nml.read(fname)
else:
conf = {top_level_key: {}}
conf[top_level_key].update(kwargs)
conf = self._fix_legacy_keys(conf, conflict=conflict)
f90nml.write(conf, fname, force=True)
return conf
def _fix_legacy_keys(self, conf, conflict="warn"):
"""
Go through config and fix any keys which are misnamed.
For example, fix any keys which have been renamed between MAGICC versions to
match the new names.
Parameters
----------
conf :obj:`f90nml.Namelist`
Configuration to check
conflict : {'warn', 'ignore'}
If 'warn', when a conflict is found, a warning is raised. If 'ignore', no
warning is raised when a conflict is found.
Returns
-------
:obj:`f90nml.Namelist`
Configuration with updated keys
Warning
-------
If a key is renamed, a warning is raised
Raises
------
ValueError
An invalid value for ``conflict`` is supplied
"""
valid_conflicts = ["warn", "ignore"]
if conflict not in valid_conflicts:
raise ValueError("`conflict` must be one of: {}".format(valid_conflicts))
cfg_key = "nml_allcfgs"
if cfg_key not in conf:
return conf
new_conf = deepcopy(conf)
for wrong_key, right_key in self._config_renamings.items():
if wrong_key in new_conf[cfg_key]:
new_conf[cfg_key][right_key] = new_conf[cfg_key].pop(wrong_key)
if conflict == "warn":
warnings.warn(
"Altering config flag {} to {}".format(wrong_key, right_key)
)
return new_conf
def set_zero_config(self):
"""Set config such that radiative forcing and temperature output will be zero
This method is intended as a convenience only, it does not handle everything in
an obvious way. Adjusting the parameter settings still requires great care and
may behave unepexctedly.
"""
# zero_emissions is imported from scenarios module
# TODO: setup MAGICC6 so it puts extra variables in right place and hence
# warning about ignoring some data disappears
zero_emissions.write(join(self.run_dir, self._scen_file_name), self.version)
time = zero_emissions.filter(variable="Emissions|CH4", region="World")[
"time"
].values
no_timesteps = len(time)
# value doesn't actually matter as calculations are done from difference but
# chose sensible value nonetheless
co2_conc_pi = 722
co2_conc = co2_conc_pi * np.ones(no_timesteps)
co2_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": "Atmospheric Concentrations|CO2",
"unit": "ppm",
"todo": "SET",
"region": "World",
"value": co2_conc,
}
)
co2_conc_writer = MAGICCData(co2_conc_df)
co2_conc_filename = "HIST_CONSTANT_CO2_CONC.IN"
co2_conc_writer.metadata = {
"header": "Constant pre-industrial CO2 concentrations"
}
co2_conc_writer.write(join(self.run_dir, co2_conc_filename), self.version)
ch4_conc_pi = 722
ch4_conc = ch4_conc_pi * np.ones(no_timesteps)
ch4_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": "Atmospheric Concentrations|CH4",
"unit": "ppb",
"todo": "SET",
"region": "World",
"value": ch4_conc,
}
)
ch4_conc_writer = MAGICCData(ch4_conc_df)
ch4_conc_filename = "HIST_CONSTANT_CH4_CONC.IN"
ch4_conc_writer.metadata = {
"header": "Constant pre-industrial CH4 concentrations"
}
ch4_conc_writer.write(join(self.run_dir, ch4_conc_filename), self.version)
fgas_conc_pi = 0
fgas_conc = fgas_conc_pi * np.ones(no_timesteps)
varname = "FGAS_CONC"
fgas_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": varname,
"unit": "ppt",
"todo": "SET",
"region": "World",
"value": fgas_conc,
}
)
fgas_conc_writer = MAGICCData(fgas_conc_df)
fgas_conc_filename = "HIST_ZERO_{}.IN".format(varname)
fgas_conc_writer.metadata = {"header": "Zero concentrations"}
fgas_conc_writer.write(join(self.run_dir, fgas_conc_filename), self.version)
def_config = self.default_config
tmp_nml = f90nml.Namelist({"nml_allcfgs": {"fgas_files_conc": 1}})
fgas_files_conc_flag = list(
self._fix_legacy_keys(tmp_nml, conflict="ignore")["nml_allcfgs"].keys()
)[0]
fgas_conc_files = [fgas_conc_filename] * len(
def_config["nml_allcfgs"][fgas_files_conc_flag]
)
self.set_config(
conflict="ignore",
file_emisscen=self._scen_file_name,
rf_initialization_method="ZEROSTARTSHIFT",
rf_total_constantafteryr=10000,
file_co2i_emis="",
file_co2b_emis="",
file_co2_conc=co2_conc_filename,
co2_switchfromconc2emis_year=10000,
file_ch4i_emis="",
file_ch4b_emis="",
file_ch4n_emis="",
file_ch4_conc=ch4_conc_filename,
ch4_switchfromconc2emis_year=10000,
file_n2oi_emis="",
file_n2ob_emis="",
file_n2on_emis="",
file_n2o_conc="",
n2o_switchfromconc2emis_year=1750,
file_noxi_emis="",
file_noxb_emis="",
file_noxi_ot="",
file_noxb_ot="",
file_noxt_rf="",
file_soxnb_ot="",
file_soxi_ot="",
file_soxt_rf="",
file_soxi_emis="",
file_soxb_emis="",
file_soxn_emis="",
file_oci_emis="",
file_ocb_emis="",
file_oci_ot="",
file_ocb_ot="",
file_oci_rf="",
file_ocb_rf="",
file_bci_emis="",
file_bcb_emis="",
file_bci_ot="",
file_bcb_ot="",
file_bci_rf="",
file_bcb_rf="",
bcoc_switchfromrf2emis_year=1750,
file_nh3i_emis="",
file_nh3b_emis="",
file_nmvoci_emis="",
file_nmvocb_emis="",
file_coi_emis="",
file_cob_emis="",
file_mineraldust_rf="",
file_landuse_rf="",
file_bcsnow_rf="",
# rf_fgassum_scale=0, # this appears to do nothing, hence the next two lines
fgas_switchfromconc2emis_year=10000,
rf_mhalosum_scale=0,
stratoz_o3scale=0,
rf_volcanic_scale=0,
rf_solar_scale=0,
mhalo_switchfromconc2emis_year=1750,
fgas_files_conc=fgas_conc_files,
)
def _format_config(self, config_dict):
# config_dict = self._fix_any_backwards_emissions_scen_key_in_config(config_dict)
config_dict = self._convert_out_config_flags_to_integers(config_dict)
return config_dict
def _convert_out_config_flags_to_integers(self, config_dict):
valid_out_flags = [
"out_emissions",
"out_gwpemissions",
"out_sum_gwpemissions",
"out_concentrations",
"out_carboncycle",
"out_forcing",
"out_forcing_subannual",
"out_temperature",
"out_temperature_subannual",
"out_sealevel",
"out_parameters",
"out_misc",
"out_lifetimes",
"out_timeseriesmix",
"out_rcpdata",
"out_summaryidx",
"out_tempoceanlayers",
"out_oceanarea",
"out_heatuptake",
"out_warnings",
"out_precipinput",
"out_aogcmtuning",
"out_ccycletuning",
"out_observationaltuning",
"out_keydata_1",
"out_keydata_2",
"out_inverseemis",
"out_surfaceforcing",
"out_permafrost",
"out_allowanydynamicvars",
]
for key in valid_out_flags:
if key in config_dict:
# MAGICC expects 1 and 0 instead of True/False
config_dict[key] = 1 if config_dict[key] else 0
return config_dict
def set_years(self, startyear=1765, endyear=2100):
"""
Set the start and end dates of the simulations.
Parameters
----------
startyear : int
Start year of the simulation
endyear : int
End year of the simulation
Returns
-------
dict
The contents of the namelist
"""
# TODO: test altering stepsperyear, I think 1, 2 and 24 should all work
return self.set_config(
"MAGCFG_NMLYEARS.CFG",
"nml_years",
endyear=endyear,
startyear=startyear,
stepsperyear=12,
)
def set_output_variables(self, write_ascii=True, write_binary=False, **kwargs):
"""Set the output configuration, minimising output as much as possible
There are a number of configuration parameters which control which variables
are written to file and in which format. Limiting the variables that are
written to file can greatly speed up the running of MAGICC. By default,
calling this function without specifying any variables will disable all output
by setting all of MAGICC's ``out_xx`` flags to ``0``.
This convenience function should not be confused with ``set_config`` or
``update_config`` which allow the user to set/update the configuration flags
directly, without the more convenient syntax and default behaviour provided by
this function.
Parameters
----------
write_ascii : bool
If true, MAGICC is configured to write output files as human readable ascii files.
write_binary : bool
If true, MAGICC is configured to write binary output files. These files are much faster
to process and write, but are not human readable.
**kwargs:
List of variables to write out. A list of possible options are as follows. This
may not be a complete list.
'emissions',
'gwpemissions',
'sum_gwpemissions',
'concentrations',
'carboncycle',
'forcing',
'surfaceforcing',
'permafrost',
'temperature',
'sealevel',
'parameters',
'misc',
'lifetimes',
'timeseriesmix',
'rcpdata',
'summaryidx',
'inverseemis',
'tempoceanlayers',
'oceanarea',
'heatuptake',
'warnings',
'precipinput',
'aogcmtuning',
'ccycletuning',
'observationaltuning',
'keydata_1',
'keydata_2'
"""
if not (write_ascii or write_binary):
raise AssertionError("write_binary and/or write_ascii must be configured")
if write_binary and write_ascii:
ascii_binary = "BOTH"
elif write_ascii:
ascii_binary = "ASCII"
else:
ascii_binary = "BINARY"
# defaults
outconfig = {
"out_emissions": 0,
"out_gwpemissions": 0,
"out_sum_gwpemissions": 0,
"out_concentrations": 0,
"out_carboncycle": 0,
"out_forcing": 0,
"out_surfaceforcing": 0,
"out_permafrost": 0,
"out_temperature": 0,
"out_sealevel": 0,
"out_parameters": 0,
"out_misc": 0,
"out_timeseriesmix": 0,
"out_rcpdata": 0,
"out_summaryidx": 0,
"out_inverseemis": 0,
"out_tempoceanlayers": 0,
"out_heatuptake": 0,
"out_ascii_binary": ascii_binary,
"out_warnings": 0,
"out_precipinput": 0,
"out_aogcmtuning": 0,
"out_ccycletuning": 0,
"out_observationaltuning": 0,
"out_keydata_1": 0,
"out_keydata_2": 0,
}
if self.version == 7:
outconfig["out_oceanarea"] = 0
outconfig["out_lifetimes"] = 0
for kw in kwargs:
val = 1 if kwargs[kw] else 0 # convert values to 0/1 instead of booleans
outconfig["out_" + kw.lower()] = val
self.update_config(**outconfig)
def get_executable(self):
"""
Get path to MAGICC executable being used
Returns
-------
str
Path to MAGICC executable being used
"""
return config["executable_{}".format(self.version)]
def diagnose_tcr_ecs_tcre(self, **kwargs):
"""
Diagnose TCR, ECS and TCRE
The transient climate response (TCR), is the global-mean temperature response
per unit cumulative |CO2| emissions at the time at which atmospheric |CO2|
concentrations double in an experiment where atmospheric |CO2| concentrations
are increased at 1% per year from pre-industrial levels (1pctCO2 experiment).
The equilibrium climate sensitivity (ECS), is the equilibrium global-mean
temperature response to an instantaneous doubling of atmospheric |CO2|
concentrations (abrupt-2xCO2 experiment).
The transient climate response to emissions (TCRE), is the global-mean
temperature response per unit cumulative |CO2| emissions at the time at which
atmospheric |CO2| concentrations double in the 1pctCO2 experiment.
Please note that sometimes the run length won't be long enough to allow
MAGICC's oceans to fully equilibrate and hence the ECS value might not be what
you expect (it should match the value of ``core_climatesensitivity``).
Parameters
----------
**kwargs
parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4``
Returns
-------
dict
Dictionary with keys: "ecs" - the diagnosed ECS; "tcr" - the diagnosed
TCR; "tcre" - the diagnosed TCRE; "timeseries" - the relevant model input
and output timeseries used in the experiment i.e. atmospheric |CO2|
concentrations, inverse |CO2| emissions, total radiative forcing and
global-mean surface temperature
"""
ecs_res = self.diagnose_ecs(**kwargs)
tcr_tcre_res = self.diagnose_tcr_tcre(**kwargs)
out = {**ecs_res, **tcr_tcre_res}
out["timeseries"] = run_append(
[ecs_res["timeseries"], tcr_tcre_res["timeseries"]]
)
return out
def diagnose_ecs(self, **kwargs):
"""
Diagnose ECS
The equilibrium climate sensitivity (ECS), is the equilibrium global-mean
temperature response to an instantaneous doubling of atmospheric |CO2|
concentrations (abrupt-2xCO2 experiment).
Please note that sometimes the run length won't be long enough to allow
MAGICC's oceans to fully equilibrate and hence the ECS value might not be what
you expect (it should match the value of ``core_climatesensitivity``).
Parameters
----------
**kwargs
parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4``
Returns
-------
dict
Dictionary with keys: "ecs" - the diagnosed ECS; "timeseries" - the
relevant model input and output timeseries used in the experiment i.e.
atmospheric |CO2| concentrations, inverse |CO2| emissions, total radiative
forcing and global-mean surface temperature
"""
self._diagnose_ecs_config_setup(**kwargs)
timeseries = self.run(
scenario=None,
only=[
"Atmospheric Concentrations|CO2",
"Radiative Forcing",
"Surface Temperature",
],
)
timeseries["scenario"] = "abrupt-2xCO2"
ecs = self.get_ecs_from_diagnosis_results(timeseries)
return {"ecs": ecs, "timeseries": timeseries}
def diagnose_tcr_tcre(self, **kwargs):
"""
Diagnose TCR and TCRE
The transient climate response (TCR), is the global-mean temperature response
per unit cumulative |CO2| emissions at the time at which atmospheric |CO2|
concentrations double in an experiment where atmospheric |CO2| concentrations
are increased at 1% per year from pre-industrial levels (1pctCO2 experiment).
The transient climate response to emissions (TCRE), is the global-mean
temperature response per unit cumulative |CO2| emissions at the time at which
atmospheric |CO2| concentrations double in the 1pctCO2 experiment.
Parameters
----------
**kwargs
parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4``
Returns
-------
dict
Dictionary with keys: "tcr" - the diagnosed TCR; "tcre" - the diagnosed
TCRE; "timeseries" - the relevant model input and output timeseries used
in the experiment i.e. atmospheric |CO2| concentrations, inverse |CO2|
emissions, total radiative forcing and global-mean surface temperature
"""
self._diagnose_tcr_tcre_config_setup(**kwargs)
timeseries = self.run(
scenario=None,
only=[
"Atmospheric Concentrations|CO2",
"INVERSEEMIS",
"Radiative Forcing",
"Surface Temperature",
],
)
# drop all the irrelevant inverse emissions
timeseries = timeseries.filter(
variable="Inverse Emissions*", level=1, keep=False
)
# drop the final year as concs stay constant from some reason,
# MAGICC bug...
timeseries = timeseries.filter(time=timeseries["time"].max(), keep=False)
timeseries["scenario"] = "1pctCO2"
tcr, tcre = self.get_tcr_tcre_from_diagnosis_results(timeseries)
return {"tcr": tcr, "tcre": tcre, "timeseries": timeseries}
def _diagnose_ecs_config_setup(self, **kwargs):
self.set_years(
startyear=1750, endyear=4200
) # 4200 seems to be the max I can push too without an error
self.update_config(
FILE_CO2_CONC="ABRUPT2XCO2_CO2_CONC.IN",
CO2_SWITCHFROMCONC2EMIS_YEAR=30000,
RF_TOTAL_RUNMODUS="CO2",
RF_TOTAL_CONSTANTAFTERYR=2000,
**kwargs,
)
def _diagnose_tcr_tcre_config_setup(self, **kwargs):
self.set_years(startyear=1750, endyear=2020)
self.update_config(
FILE_CO2_CONC="1PCTCO2_CO2_CONC.IN",
CO2_SWITCHFROMCONC2EMIS_YEAR=30000,
RF_TOTAL_RUNMODUS="CO2",
RF_TOTAL_CONSTANTAFTERYR=3000,
OUT_INVERSEEMIS=1,
**kwargs,
)
def get_ecs_from_diagnosis_results(self, results_ecs_run):
"""
Diagnose ECS from the results of the abrupt-2xCO2 experiment
Parameters
----------
results_ecs_run : :obj:`ScmRun`
Results of the abrupt-2xCO2 experiment, must contain atmospheric |CO2|
concentrations, total radiative forcing and surface temperature.
Returns
-------
ecs : :obj:`pint.quantity.Quantity`
ECS diagnosed from ``results_ecs_run``
"""
global_co2_concs = results_ecs_run.filter(
variable="Atmospheric Concentrations|CO2", region="World"
)
ecs_time, ecs_start_time = self._get_ecs_ecs_start_yr_from_CO2_concs(
global_co2_concs
)
global_total_rf = results_ecs_run.filter(
variable="Radiative Forcing", region="World"
)
self._check_ecs_total_RF(global_total_rf, jump_time=ecs_start_time)
global_temp = results_ecs_run.filter(
variable="Surface Temperature", region="World"
)
self._check_ecs_temp(global_temp)
ecs = float(global_temp.filter(time=ecs_time).values.squeeze())
unit = global_temp.get_unique_meta("unit", no_duplicates=True)
ecs = ecs * unit_registry(unit)
return ecs
def get_tcr_tcre_from_diagnosis_results(self, results_tcr_tcre_run):
"""
Diagnose TCR and TCRE from the results of the 1pctCO2 experiment
Parameters
----------
results_tcr_tcre_run : :obj:`ScmRun`
Results of the 1pctCO2 experiment, must contain atmospheric |CO2|
concentrations, inverse |CO2| emissions, total radiative forcing and
surface temperature.
Returns
-------
tcr, tcre : :obj:`pint.quantity.Quantity`, :obj:`pint.quantity.Quantity`
TCR and TCRE diagnosed from ``results_tcr_tcre_run``
"""
global_co2_concs = results_tcr_tcre_run.filter(
variable="Atmospheric Concentrations|CO2", region="World"
)
(tcr_time, tcr_start_time,) = self._get_tcr_tcr_start_yr_from_CO2_concs(
global_co2_concs
)
if tcr_time.year != tcr_start_time.year + 70: # pragma: no cover # emergency
raise AssertionError("Has the definition of TCR and TCRE changed?")
global_inverse_co2_emms = results_tcr_tcre_run.filter(
variable="Inverse Emissions|CO2|MAGICC Fossil and Industrial",
region="World",
)
global_total_rf = results_tcr_tcre_run.filter(
variable="Radiative Forcing", region="World"
)
self._check_tcr_tcre_total_RF(global_total_rf, tcr_time=tcr_time)
global_temp = results_tcr_tcre_run.filter(
variable="Surface Temperature", region="World"
)
self._check_tcr_tcre_temp(global_temp)
tcr = float(global_temp.filter(time=tcr_time).values.squeeze())
tcr_unit = global_temp.get_unique_meta("unit", no_duplicates=True)
tcr = tcr * unit_registry(tcr_unit)
tcre_cumulative_emms = float(
global_inverse_co2_emms.filter(
year=range(tcr_start_time.year, tcr_time.year)
).values.sum()
)
emms_unit = global_inverse_co2_emms.get_unique_meta("unit", no_duplicates=True)
years = global_inverse_co2_emms["year"].values.squeeze()
if not np.all((years[1:] - years[:-1]) == 1): # pragma: no cover
raise AssertionError(
"TCR/TCRE diagnosis assumed to be on annual timestep. Please "
"raise an issue at "
"https://github.com/openscm/pymagicc/issues to discuss "
"your use case"
)
# can now safely assume that our simple sum has done the right thing
tcre_cumulative_emms_unit = unit_registry(emms_unit) * unit_registry("yr")
tcre_cumulative_emms = tcre_cumulative_emms * tcre_cumulative_emms_unit
tcre = tcr / tcre_cumulative_emms
return tcr, tcre
def _get_ecs_ecs_start_yr_from_CO2_concs(self, df_co2_concs):
co2_concs = df_co2_concs.timeseries()
co2_conc_0 = co2_concs.iloc[0, 0]
t_start = co2_concs.columns.min()
t_end = co2_concs.columns.max()
ecs_start_time = co2_concs.iloc[
:, co2_concs.values.squeeze() > co2_conc_0
].columns[0]
spin_up_co2_concs = (
_filter_time_range(df_co2_concs, lambda x: t_start <= x < ecs_start_time)
.timeseries()
.values.squeeze()
)
if not (spin_up_co2_concs == co2_conc_0).all():
raise ValueError(
"The ECS CO2 concs look wrong, they are not constant before they start rising"
)
co2_conc_final = 2 * co2_conc_0
eqm_co2_concs = (
_filter_time_range(df_co2_concs, lambda x: ecs_start_time <= x <= t_end)
.timeseries()
.values.squeeze()
)
if not np.isclose(eqm_co2_concs, co2_conc_final).all():
raise ValueError(
"The ECS CO2 concs look wrong, they are not constant after doubling"
)
ecs_time = df_co2_concs["time"].iloc[-1]
return ecs_time, ecs_start_time
def _get_tcr_tcr_start_yr_from_CO2_concs(self, df_co2_concs):
co2_concs = df_co2_concs.timeseries()
co2_conc_0 = co2_concs.iloc[0, 0]
t_start = co2_concs.columns.min()
t_end = co2_concs.columns.max()
tcr_start_time = co2_concs.iloc[
:, co2_concs.values.squeeze() > co2_conc_0
].columns[0] - relativedelta(years=1)
tcr_time = tcr_start_time + relativedelta(years=70)
spin_up_co2_concs = (
_filter_time_range(df_co2_concs, lambda x: t_start <= x <= tcr_start_time)
.timeseries()
.values.squeeze()
)
if not (spin_up_co2_concs == co2_conc_0).all():
raise ValueError(
"The TCR/TCRE CO2 concs look wrong, they are not constant before they start rising"
)
actual_rise_co2_concs = (
_filter_time_range(df_co2_concs, lambda x: tcr_start_time <= x <= t_end)
.timeseries()
.values.squeeze()
)
# this will blow up if we switch to diagnose tcr/ecs with a monthly run...
expected_rise_co2_concs = co2_conc_0 * 1.01 ** np.arange(
len(actual_rise_co2_concs)
)
rise_co2_concs_correct = np.isclose(
actual_rise_co2_concs, expected_rise_co2_concs
).all()
if not rise_co2_concs_correct:
raise ValueError("The TCR/TCRE CO2 concs look wrong during the rise period")
return tcr_time, tcr_start_time
def _check_ecs_total_RF(self, df_total_rf, jump_time):
total_rf = df_total_rf.timeseries()
total_rf_max = total_rf.values.squeeze().max()
t_start = total_rf.columns.min()
t_end = total_rf.columns.max()
spin_up_rf = (
_filter_time_range(df_total_rf, lambda x: t_start <= x < jump_time)
.timeseries()
.values.squeeze()
)
if not (spin_up_rf == 0).all():
raise ValueError(
"The ECS total radiative forcing looks wrong, it is not all zero before concentrations start rising"
)
eqm_rf = (
_filter_time_range(df_total_rf, lambda x: jump_time <= x <= t_end)
.timeseries()
.values.squeeze()
)
if not (eqm_rf == total_rf_max).all():
raise ValueError(
"The ECS total radiative forcing looks wrong, it is not constant after concentrations double"
)
def _check_tcr_tcre_total_RF(self, df_total_rf, tcr_time):
total_rf = df_total_rf.timeseries()
t_start = total_rf.columns.min()
tcr_start_time = tcr_time - relativedelta(years=70)
spin_up_rf = (
_filter_time_range(df_total_rf, lambda x: t_start <= x <= tcr_start_time)
.timeseries()
.values.squeeze()
)
if not (spin_up_rf == 0).all():
raise ValueError(
"The TCR/TCRE total radiative forcing looks wrong, it is not all zero before concentrations start rising"
)
rf_vls = total_rf.values.squeeze()
rf_minus_previous_yr = rf_vls[1:] - rf_vls[:-1]
if not np.all(rf_minus_previous_yr >= 0):
raise ValueError(
"The TCR/TCRE total radiative forcing looks wrong, it is not rising after concentrations start rising"
)
def _check_ecs_temp(self, df_temp):
self._check_tcr_ecs_tcre_temp(
df_temp, "The ECS surface temperature looks wrong, it decreases"
)
def _check_tcr_tcre_temp(self, df_temp):
self._check_tcr_ecs_tcre_temp(
df_temp, "The TCR/TCRE surface temperature looks wrong, it decreases"
)
def _check_tcr_ecs_tcre_temp(self, df_temp, message):
tmp_vls = df_temp.timeseries().values.squeeze()
tmp_minus_previous_yr = tmp_vls[1:] - tmp_vls[:-1]
if not np.all(tmp_minus_previous_yr >= 0):
raise ValueError(message)
def set_emission_scenario_setup(self, scenario, config_dict):
"""Set the emissions flags correctly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run.
config_dict : dict
Dictionary with current input configurations which is to be validated and
updated where necessary.
Returns
-------
dict
Updated configuration
"""
self.write(scenario, self._scen_file_name)
emis_flag = list(
self._fix_legacy_keys(
f90nml.Namelist({"nml_allcfgs": {"file_emisscen": "junk"}}),
conflict="ignore",
)["nml_allcfgs"].keys()
)[0]
config_dict[emis_flag] = self._scen_file_name
return config_dict
def _check_config(self):
"""
Check config above and beyond those checked by ``self.check_config``
"""
pass
class MAGICC6(MAGICCBase):
version = 6
_scen_file_name = "SCENARIO.SCEN"
_config_renamings = {
"file_emisscen": "file_emissionscenario",
"fgas_files_conc": "file_fgas_conc",
"mhalo_switchfromconc2emis_year": "mhalo_switch_conc2emis_yr",
}
@property
def default_config(self):
"""
Default configuration to use in a run
"""
base = f90nml.read(join(self.run_dir, "MAGCFG_DEFAULTALL_69.CFG"))
user = f90nml.read(join(self.run_dir, "MAGCFG_USER.CFG"))
self._default_config = deepcopy(base)
self._default_config.update(user)
return self._default_config
def _check_tcr_ecs_tcre_total_RF(self, df_total_rf, tcr_time, ecs_time):
super()._check_tcr_ecs_tcre_total_RF(df_total_rf, tcr_time, ecs_time)
# can be more careful with checks MAGICC6 only has logarithmic CO2 forcing
# i.e. linear rise in forcing
total_rf = df_total_rf.timeseries()
total_rf_max = total_rf.values.squeeze().max()
tcre_start_time = tcr_time - relativedelta(years=70)
actual_rise_rf = (
_filter_time_range(df_total_rf, lambda x: tcre_start_time <= x <= tcr_time)
.timeseries()
.values.squeeze()
)
# this will blow up if we switch to diagnose tcr/ecs with a monthly run...
expected_rise_rf = total_rf_max / 70.0 * np.arange(71)
rise_rf_correct = np.isclose(actual_rise_rf, expected_rise_rf).all()
if not rise_rf_correct:
raise ValueError(
"The TCR/ECS/TCRE total radiative forcing looks wrong during the rise period"
)
def _check_config(self):
cfg = self.update_config()
if "file_emissionscenario" in cfg["nml_allcfgs"]:
if cfg["nml_allcfgs"]["file_emissionscenario"].endswith("SCEN7"):
self._check_failed("MAGICC6 cannot run SCEN7 files")
class MAGICC7(MAGICCBase):
version = 7
_config_renamings = {
"file_emissionscenario": "file_emisscen",
"file_fgas_conc": "fgas_files_conc",
"mhalo_switch_conc2emis_yr": "mhalo_switchfromconc2emis_year",
}
def create_copy(self):
"""
Initialises a temporary directory structure and copy of MAGICC
configuration files and binary.
This will also overwrite the value of all ``file_tuningmodel_x`` flags to
ensure that Pymagicc's configurations will be read. If ``self.strict``, this
will also overwrite the value of all ``file_emisscen_x`` flags to ensure that
only Pymagicc's scenario input is used. This overwrite behaviour can be
removed once the MAGICC7 binary is publicly released as we can then create a
Pymagicc specific MAGCFG_USER.CFG rather than relying on whatever is in the
user's current copy.
"""
super(MAGICC7, self).create_copy()
self.update_config(
"MAGCFG_USER.CFG",
**{
"file_tuningmodel_1": "PYMAGICC",
"file_tuningmodel_2": "USER",
"file_tuningmodel_3": "USER",
"file_tuningmodel_4": "USER",
"file_tuningmodel_5": "USER",
"file_tuningmodel_6": "USER",
"file_tuningmodel_7": "USER",
"file_tuningmodel_8": "USER",
"file_tuningmodel_9": "USER",
"file_tuningmodel_10": "USER",
},
)
if self.strict:
self.update_config(
"MAGCFG_USER.CFG",
**{
"file_emisscen_2": "NONE",
"file_emisscen_3": "NONE",
"file_emisscen_4": "NONE",
"file_emisscen_5": "NONE",
"file_emisscen_6": "NONE",
"file_emisscen_7": "NONE",
"file_emisscen_8": "NONE",
},
)
def _diagnose_tcr_ecs_tcre_config_setup(self, **kwargs):
super()._diagnose_tcr_ecs_tcre_config_setup(**kwargs)
# also need to lock CH4 and N2O in case OLBL forcing mode is being used
self.update_config(
FILE_CH4_CONC="TCRECS_CH4_CONC.IN",
CH4_SWITCHFROMCONC2EMIS_YEAR=30000,
FILE_N2O_CONC="TCRECS_N2O_CONC.IN",
N2O_SWITCHFROMCONC2EMIS_YEAR=30000,
)
def _check_config(self):
pass
def _filter_time_range(scmdf, filter_func):
# TODO: move into openscm
tdf = scmdf.timeseries()
tdf = tdf.iloc[:, tdf.columns.map(filter_func)]
return MAGICCData(tdf)
| agpl-3.0 |
kyleabeauchamp/HMCNotes | code/old/test_ghmc_repartition.py | 1 | 1292 | import lb_loader
import numpy as np
import pandas as pd
import simtk.openmm as mm
from simtk.openmm import app
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
pd.set_option('display.width', 1000)
n_steps = 3000
temperature = 300. * u.kelvin
for hydrogenMass in [1.0, 2.0, 3.0, 3.5]:
hydrogenMass = hydrogenMass * u.amu
#system, positions = lb_loader.load_lb(hydrogenMass=hydrogenMass)
testsystem = testsystems.DHFRExplicit(hydrogenMass=hydrogenMass)
system, positions = testsystem.system, testsystem.positions
integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, 0.25 * u.femtoseconds)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(10000)
positions = context.getState(getPositions=True).getPositions()
collision_rate = 1.0 / u.picoseconds
timestep = 1.5 * u.femtoseconds
steps_per_hmc = 12
k_max = 3
integrator = hmc_integrators.GHMC2(temperature, steps_per_hmc, timestep)
context = mm.Context(system, integrator)
context.setPositions(positions)
context.setVelocitiesToTemperature(temperature)
integrator.step(1)
integrator.step(2500)
data = integrator.vstep(5)
| gpl-2.0 |
ELind77/gensim | gensim/sklearn_integration/sklearn_wrapper_gensim_lsimodel.py | 1 | 4027 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
from scipy import sparse
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
from gensim.sklearn_integration import BaseSklearnWrapper
class SklLsiModel(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
"""
Base LSI module
"""
def __init__(self, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, onepass=True, power_iters=2, extra_samples=100):
"""
Sklearn wrapper for LSI model. Class derived from gensim.model.LsiModel.
"""
self.gensim_model = None
self.num_topics = num_topics
self.id2word = id2word
self.chunksize = chunksize
self.decay = decay
self.onepass = onepass
self.extra_samples = extra_samples
self.power_iters = power_iters
def get_params(self, deep=True):
"""
Returns all parameters as dictionary.
"""
return {"num_topics": self.num_topics, "id2word": self.id2word,
"chunksize": self.chunksize, "decay": self.decay, "onepass": self.onepass,
"extra_samples": self.extra_samples, "power_iters": self.power_iters}
def set_params(self, **parameters):
"""
Set all parameters.
"""
super(SklLsiModel, self).set_params(**parameters)
return self
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.LsiModel
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(X)
else:
corpus = X
self.gensim_model = models.LsiModel(corpus=corpus, num_topics=self.num_topics, id2word=self.id2word, chunksize=self.chunksize,
decay=self.decay, onepass=self.onepass, power_iters=self.power_iters, extra_samples=self.extra_samples)
return self
def transform(self, docs):
"""
Takes a list of documents as input ('docs').
Returns a matrix of topic distribution for the given document bow, where a_ij
indicates (topic_i, topic_probability_j).
The input `docs` should be in BOW format and can be a list of documents like : [ [(4, 1), (7, 1)], [(9, 1), (13, 1)], [(2, 1), (6, 1)] ]
or a single document like : [(4, 1), (7, 1)]
"""
if self.gensim_model is None:
raise NotFittedError("This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.")
# The input as array of array
check = lambda x: [x] if isinstance(x[0], tuple) else x
docs = check(docs)
X = [[] for i in range(0,len(docs))];
for k,v in enumerate(docs):
doc_topics = self.gensim_model[v]
probs_docs = list(map(lambda x: x[1], doc_topics))
# Everything should be equal in length
if len(probs_docs) != self.num_topics:
probs_docs.extend([1e-12]*(self.num_topics - len(probs_docs)))
X[k] = probs_docs
probs_docs = []
return np.reshape(np.array(X), (len(docs), self.num_topics))
def partial_fit(self, X):
"""
Train model over X.
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(X)
if self.gensim_model is None:
self.gensim_model = models.LsiModel(num_topics=self.num_topics, id2word=self.id2word, chunksize=self.chunksize,
decay=self.decay, onepass=self.onepass, power_iters=self.power_iters, extra_samples=self.extra_samples)
self.gensim_model.add_documents(corpus=X)
return self
| lgpl-2.1 |
pravsripad/mne-python | tutorials/stats-sensor-space/plot_stats_spatio_temporal_cluster_sensors.py | 18 | 7414 | """
=====================================================
Spatiotemporal permutation F-test on full sensor data
=====================================================
Tests for differential evoked responses in at least
one condition using a permutation clustering test.
The FieldTrip neighbor templates will be used to determine
the adjacency between sensors. This serves as a spatial prior
to the clustering. Spatiotemporal clusters will then
be visualized using custom matplotlib code.
See the `FieldTrip website`_ for a caveat regarding
the possible interpretation of "significant" clusters.
"""
# Authors: Denis Engemann <[email protected]>
# Jona Sassenhagen <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import mne
from mne.stats import spatio_temporal_cluster_test
from mne.datasets import sample
from mne.channels import find_ch_adjacency
from mne.viz import plot_compare_evokeds
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = {'Aud/L': 1, 'Aud/R': 2, 'Vis/L': 3, 'Vis/R': 4}
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 30, fir_design='firwin')
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for the channel of interest
# ---------------------------------------
picks = mne.pick_types(raw.info, meg='mag', eog=True)
reject = dict(mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, reject=reject, preload=True)
epochs.drop_channels(['EOG 061'])
epochs.equalize_event_counts(event_id)
X = [epochs[k].get_data() for k in event_id] # as 3D matrix
X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering
###############################################################################
# Find the FieldTrip neighbor definition to setup sensor adjacency
# ----------------------------------------------------------------
adjacency, ch_names = find_ch_adjacency(epochs.info, ch_type='mag')
print(type(adjacency)) # it's a sparse matrix!
plt.imshow(adjacency.toarray(), cmap='gray', origin='lower',
interpolation='nearest')
plt.xlabel('{} Magnetometers'.format(len(ch_names)))
plt.ylabel('{} Magnetometers'.format(len(ch_names)))
plt.title('Between-sensor adjacency')
###############################################################################
# Compute permutation statistic
# -----------------------------
#
# How does it work? We use clustering to "bind" together features which are
# similar. Our features are the magnetic fields measured over our sensor
# array at different times. This reduces the multiple comparison problem.
# To compute the actual test-statistic, we first sum all F-values in all
# clusters. We end up with one statistic for each cluster.
# Then we generate a distribution from the data by shuffling our conditions
# between our samples and recomputing our clusters and the test statistics.
# We test for the significance of a given cluster by computing the probability
# of observing a cluster of that size. For more background read:
# Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
# MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
# doi:10.1016/j.jneumeth.2007.03.024
# set cluster threshold
threshold = 50.0 # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.01
cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
threshold=threshold, tail=1,
n_jobs=1, buffer_size=None,
adjacency=adjacency)
T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
###############################################################################
# Note. The same functions work with source estimate. The only differences
# are the origin of the data, the size, and the adjacency definition.
# It can be used for single trials or for groups of subjects.
#
# Visualize clusters
# ------------------
# configure variables for visualization
colors = {"Aud": "crimson", "Vis": 'steelblue'}
linestyles = {"L": '-', "R": '--'}
# organize data for plotting
evokeds = {cond: epochs[cond].average() for cond in event_id}
# loop over clusters
for i_clu, clu_idx in enumerate(good_cluster_inds):
# unpack cluster information, get unique indices
time_inds, space_inds = np.squeeze(clusters[clu_idx])
ch_inds = np.unique(space_inds)
time_inds = np.unique(time_inds)
# get topography for F stat
f_map = T_obs[time_inds, ...].mean(axis=0)
# get signals at the sensors contributing to the cluster
sig_times = epochs.times[time_inds]
# create spatial mask
mask = np.zeros((f_map.shape[0], 1), dtype=bool)
mask[ch_inds, :] = True
# initialize figure
fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
# plot average test statistic and mark significant sensors
f_evoked = mne.EvokedArray(f_map[:, np.newaxis], epochs.info, tmin=0)
f_evoked.plot_topomap(times=0, mask=mask, axes=ax_topo, cmap='Reds',
vmin=np.min, vmax=np.max, show=False,
colorbar=False, mask_params=dict(markersize=10))
image = ax_topo.images[0]
# create additional axes (for ERF and colorbar)
divider = make_axes_locatable(ax_topo)
# add axes for colorbar
ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
plt.colorbar(image, cax=ax_colorbar)
ax_topo.set_xlabel(
'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]]))
# add new axis for time courses and plot time courses
ax_signals = divider.append_axes('right', size='300%', pad=1.2)
title = 'Cluster #{0}, {1} sensor'.format(i_clu + 1, len(ch_inds))
if len(ch_inds) > 1:
title += "s (mean)"
plot_compare_evokeds(evokeds, title=title, picks=ch_inds, axes=ax_signals,
colors=colors, linestyles=linestyles, show=False,
split_legend=True, truncate_yaxis='auto')
# plot temporal cluster extent
ymin, ymax = ax_signals.get_ylim()
ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
color='orange', alpha=0.3)
# clean up viz
mne.viz.tight_layout(fig=fig)
fig.subplots_adjust(bottom=.05)
plt.show()
###############################################################################
# Exercises
# ----------
#
# - What is the smallest p-value you can obtain, given the finite number of
# permutations?
# - use an F distribution to compute the threshold by traditional significance
# levels. Hint: take a look at :obj:`scipy.stats.f`
#
# .. _fieldtrip website:
# http://www.fieldtriptoolbox.org/faq/
# how_not_to_interpret_results_from_a_cluster-based_permutation_test
| bsd-3-clause |
MehtapIsik/bayesian-itc | scripts/bitc_util.py | 1 | 10460 | #!/usr/bin/python
"""
A module implementing Bayesian analysis of isothermal titration calorimetry (ITC) experiments
Written by John D. Chodera <[email protected]>, Pande lab, Stanford, 2008.
Copyright (c) 2008 Stanford University. All Rights Reserved.
All code in this repository is released under the GNU General Public License.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
NOTES
* Throughout, quantities with associated units employ the pint Quantity class to store quantities
in references units. Multiplication or division by desired units should ALWAYS be used to
store or extract quantities in the desired units.
"""
import os
from os.path import basename, splitext
import numpy
import logging
from bitc.units import ureg, Quantity
import pymc
from bitc.report import Report, analyze
from bitc.parser import optparser
from bitc.experiments import Injection, ExperimentMicroCal, ExperimentYaml
from bitc.instruments import known_instruments, Instrument
from bitc.models import RescalingStep, known_models
import sys
try:
import seaborn
except ImportError:
pass
def compute_normal_statistics(x_t):
# Compute mean.
x = x_t.mean()
# Compute stddev.
dx = x_t.std()
# Compute 95% confidence interval.
ci = 0.95
N = x_t.size
x_sorted = numpy.sort(x_t)
low_index = round((0.5-ci/2.0)*N)
high_index = round((0.5+ci/2.0)*N)
xlow = x_sorted[low_index]
xhigh = x_sorted[high_index]
return [x, dx, xlow, xhigh]
validated = optparser()
# Process the arguments
working_directory = validated['--workdir']
if not os.path.exists(working_directory):
os.mkdir(working_directory)
os.chdir(working_directory)
# Set the logfile
if validated['--log']:
logfile = '%(--log)s' % validated
else:
logfile = None
# Level of verbosity in log
if validated['-v'] == 3:
loglevel = logging.DEBUG
elif validated['-v'] == 2:
loglevel = logging.INFO
elif validated['-v'] == 1:
loglevel = logging.WARNING
else:
loglevel = logging.ERROR
# Set up the logger
logging.basicConfig(format='%(levelname)s::%(module)s:L%(lineno)s\n%(message)s', level=loglevel, filename=logfile)
# Files for procesysing
filenames = validated['<datafiles>'] # .itc file to process
file_basenames, file_extensions = zip(*[splitext(basename(filename)) for filename in filenames])
# todo fix names
if not validated['--name']:
# Name of the experiment, and output files
experiment_name = file_basenames[0]
else:
experiment_name = validated['--name']
# If this is a file, it will attempt to read it like an origin file and override heats in experiment.
# todo fix this, wont work for multiple files
integrated_heats_file = validated['--heats'] # file with integrated heats
if validated['mcmc']:
# MCMC settings
nfit = validated['--nfit'] # number of iterations for maximum a posteriori fit
niters = validated['--niters'] # number of iterations
nburn = validated['--nburn'] # number of burn-in iterations
nthin = validated['--nthin'] # thinning period
Model = known_models[validated['--model']] # Model type for mcmc
instruments = list()
# todo fix this flag for multiple files
if validated['--instrument']:
# Use an instrument from the brochure
instrument = [known_instruments[validated['--instrument']]()] * len(filenames)
else:
# Read instrument properties from the .itc or yml file
for index, (filename, file_extension) in enumerate(zip(filenames,file_extensions)):
if file_extension in ['.yaml', '.yml']:
import yaml
with open(filename, 'r') as yamlfile:
yamldict = yaml.load(yamlfile)
instrument_name = yamldict['instrument']
if instrument_name in known_instruments.keys():
import bitc.instruments
# Get the instrument class from bitc.instruments and instance it
instruments.append(getattr(bitc.instruments, instrument_name)())
elif file_extension in ['.itc']:
instruments.append(Instrument(itcfile=filename))
else:
raise ValueError("The instrument needs to be specified on the commandline for non-standard files")
logging.debug("Received this input from the user:")
logging.debug(str(validated))
logging.debug("Current state:")
logging.debug(str(locals()))
# Close all figure windows.
import pylab
pylab.close('all')
logging.info("Reading ITC data from %s" % filename)
# TODO make this a parallel loop?
experiments = list()
for filename, experiment_name, file_extension, instrument in zip(filenames, file_basenames, file_extensions, instruments):
if file_extension in ['.yaml', '.yml']:
logging.info("Experiment interpreted as literature data: %s" % experiment_name)
experiments.append(ExperimentYaml(filename, experiment_name, instrument))
elif file_extension in ['.itc']:
logging.info("Experiment interpreted as raw .itc data: %s" % experiment_name)
experiments.append(ExperimentMicroCal(filename, experiment_name, instrument))
else:
raise ValueError('Unknown file type. Check your file extension')
logging.debug(str(experiments))
# Only need to perform analysis for a .itc file.
for experiment, file_extension in zip(experiments, file_extensions):
if file_extension in ['.itc']:
# TODO work on a markdown version for generating reports. Perhaps use sphinx
analyze(experiment_name, experiment)
# Write Origin-style integrated heats.
for experiment, experiment_name in zip(experiments, file_basenames):
filename = experiment_name + '-integrated.txt'
experiment.write_integrated_heats(filename)
# Override the heats if file specified.
# TODO deal with flag
# if integrated_heats_file:
# experiment.read_integrated_heats(integrated_heats_file)
# MCMC inference
if not validated['mcmc']:
sys.exit(0)
# Construct a Model from Experiment object.
import traceback
if validated['--model'] == 'TwoComponent':
models = list()
try:
for experiment in experiments:
models.append(Model(experiment))
except Exception as e:
logging.error(str(e))
logging.error(traceback.format_exc())
raise Exception("MCMC model could not me constructed!\n" + str(e))
# First fit the model.
# TODO This should be incorporated in the model. Perhaps as a model.getSampler() method?
for model in models:
logging.info("Fitting model...")
map = pymc.MAP(model)
map.fit(iterlim=nfit)
logging.info(map)
logging.info("Sampling...")
model.mcmc.sample(iter=niters, burn=nburn, thin=nthin, progress_bar=True)
#pymc.Matplot.plot(mcmc)
# Plot individual terms.
if sum(model.experiment.cell_concentration.values()) > Quantity('0.0 molar'):
pymc.Matplot.plot(model.mcmc.trace('P0')[:], '%s-P0' % model.experiment.name)
if sum(model.experiment.syringe_concentration.values()) > Quantity('0.0 molar'):
pymc.Matplot.plot(model.mcmc.trace('Ls')[:], '%s-Ls' % model.experiment.name)
pymc.Matplot.plot(model.mcmc.trace('DeltaG')[:], '%s-DeltaG' % model.experiment.name)
pymc.Matplot.plot(model.mcmc.trace('DeltaH')[:], '%s-DeltaH' % model.experiment.name)
pymc.Matplot.plot(model.mcmc.trace('DeltaH_0')[:], '%s-DeltaH_0' % model.experiment.name)
pymc.Matplot.plot(numpy.exp(model.mcmc.trace('log_sigma')[:]), '%s-sigma' % model.experiment.name)
# TODO: Plot fits to enthalpogram.
#experiment.plot(model=model, filename='%s-enthalpogram.png' % experiment_name) # todo fix this
# Compute confidence intervals in thermodynamic parameters.
outfile = open('%s.confidence-intervals.out' % model.experiment.name, 'a+')
outfile.write('%s\n' % model.experiment.name)
[x, dx, xlow, xhigh] = compute_normal_statistics(model.mcmc.trace('DeltaG')[:] )
outfile.write('DG: %8.2f +- %8.2f kcal/mol [%8.2f, %8.2f] \n' % (x, dx, xlow, xhigh))
[x, dx, xlow, xhigh] = compute_normal_statistics(model.mcmc.trace('DeltaH')[:] )
outfile.write('DH: %8.2f +- %8.2f kcal/mol [%8.2f, %8.2f] \n' % (x, dx, xlow, xhigh))
[x, dx, xlow, xhigh] = compute_normal_statistics(model.mcmc.trace('DeltaH_0')[:] )
outfile.write('DH0: %8.2f +- %8.2f ucal [%8.2f, %8.2f] \n' % (x, dx, xlow, xhigh))
[x, dx, xlow, xhigh] = compute_normal_statistics(model.mcmc.trace('Ls')[:] )
outfile.write('Ls: %8.2f +- %8.2f uM [%8.2f, %8.2f] \n' % (x, dx, xlow, xhigh))
[x, dx, xlow, xhigh] = compute_normal_statistics(model.mcmc.trace('P0')[:] )
outfile.write('P0: %8.2f +- %8.2f uM [%8.2f, %8.2f] \n' % (x, dx, xlow, xhigh))
[x, dx, xlow, xhigh] = compute_normal_statistics(numpy.exp(model.mcmc.trace('log_sigma')[:]) )
outfile.write('sigma: %8.5f +- %8.5f ucal/s^(1/2) [%8.5f, %8.5f] \n' % (x, dx, xlow, xhigh))
outfile.write('\n')
outfile.close()
elif validated['--model'] == 'Competitive':
if not validated['--receptor']:
raise ValueError('Need to specify a receptor for Competitive model')
else:
receptor = validated['--receptor']
try:
for experiment in experiments:
model = Model(experiments, receptor)
except Exception as e:
logging.error(str(e))
logging.error(traceback.format_exc())
raise Exception("MCMC model could not me constructed!\n" + str(e))
logging.info("Fitting model...")
map = pymc.MAP(model, verbose=10)
map.fit(iterlim=nfit, verbose=10)
logging.info(map)
logging.info("Sampling...")
model.mcmc.sample(iter=niters, burn=nburn, thin=nthin, progress_bar=True)
pymc.Matplot.plot(model.mcmc, "MCMC.png")
pymc.graph.dag(model.mcmc)
| gpl-3.0 |
rhshah/iCallSV | iCallSV/mergeFinalFiles.py | 2 | 15104 | """
mergeFinalFiles
~~~~~~~~~~~~~~~
:Description: Merge VCF, iAnnotateSV tab and targetSeqView tab file into a single tab-delimited file
"""
'''
Created on May 17, 2015
Description: Merge VCF, iAnnotateSV tab and targetSeqView tab file into a single tab-delimited file
@author: Ronak H Shah
::Input::
aId: Sample ID for case that has the structural abberations
bId: Sample ID for control
vcfFile: Delly filtered and merged VCF file
annoTab: iAnnotateSV tab-delimited file with annotations
confTab: targetSeqView tab-delimited file with probability score
outputDir: Directory to write the output file
outputPrefix: Output File Prefix
::Output::
outputFile: File with following header
"TumorId\tNormalId\tChr1\tPos1\tChr2\tPos2\tSV_Type\tGene1\tGene2\tTranscript1\tTranscript2\tSite1Description\tSite2Description\tFusion\tProbabilityScore\tConfidence\tComments\tConnection_Type\tSV_LENGTH\tMAPQ\tPairEndReadSupport\tSplitReadSupport\tBrkptType\tConsensusSequence\tTumorVariantCount\tTumorSplitVariantCount\tTumorReadCount\tTumorGenotypeQScore\tNormalVariantCount\tNormalSplitVariantCount\tNormalReadCount\tNormalGenotypeQScorerepName-repClass-repFamily:-site1\trepName-repClass-repFamily:-site2\tCC_Chr_Band\tCC_Tumour_Types(Somatic)\tCC_Cancer_Syndrome\tCC_Mutation_Type\tCC_Translocation_Partner\tDGv_Name-DGv_VarType-site1\tDGv_Name-DGv_VarType-site2\n";
'''
import sys
import os
import logging
import vcf
import checkparameters as cp
import re
import coloredlogs
import numpy as np
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp() #So that matplotlib doesnot complain stale file handle
try:
import pandas as pd
except ImportError, e:
print "mergeFinalFiles: pandas is not installed, please install pandas as it is required to run the mapping."
sys.exit(1)
logger = logging.getLogger('iCallSV.mergeFinalFiles')
coloredlogs.install(level='DEBUG')
def run(aId, bId, vcfFile, annoTab, confTab, outDir, outputPrefix, verbose):
"""
This will Merge VCF, iAnnotateSV tab and targetSeqView tab file into a single tab-delimited file
:param str aId: Sample ID for case that has the structural abberations
:param str bId: Sample ID for control
:param str vcfFile: Delly filtered and merged VCF file
:param str annoTab: iAnnotateSV tab-delimited file with annotations
:param str confTab: targetSeqView tab-delimited file with probability score
:param str outputDir: Directory to write the output file
:param str outputPrefix: Output File Prefix
:return: str of the tab-delimited file
:rtype: str
"""
if(verbose):
logger.info(
"iCallSV::MergeFinalFile: Merging Delly Filtered VCF, iAnnotateSV tab and targetSeqView tab file into a single tab-delimited file")
cp.checkFile(vcfFile)
cp.checkFile(annoTab)
# cp.checkFile(confTab)
cp.checkDir(outDir)
outDF = pd.DataFrame(
columns=[
"TumorId",
"NormalId",
"Chr1",
"Pos1",
"Chr2",
"Pos2",
"SV_Type",
"Gene1",
"Gene2",
"Transcript1",
"Transcript2",
"Site1Description",
"Site2Description",
"Fusion",
"ProbabilityScore",
"Confidence",
"Comments",
"Connection_Type",
"SV_LENGTH",
"MAPQ",
"PairEndReadSupport",
"SplitReadSupport",
"BrkptType",
"ConsensusSequence",
"TumorReferenceCount",
"TumorSplitReferenceCount",
"TumorVariantCount",
"TumorSplitVariantCount",
"TumorReadCount",
"TumorGenotypeQScore",
"NormalReferenceCount",
"NormalSplitReferenceCount",
"NormalVariantCount",
"NormalSplitVariantCount",
"NormalReadCount",
"NormalGenotypeQScore",
"Cosmic_Fusion_Counts",
"repName-repClass-repFamily:-site1",
"repName-repClass-repFamily:-site2",
"CC_Chr_Band",
"CC_Tumour_Types(Somatic)",
"CC_Cancer_Syndrome",
"CC_Mutation_Type",
"CC_Translocation_Partner",
"DGv_Name-DGv_VarType-site1",
"DGv_Name-DGv_VarType-site2"])
annoDF = pd.read_csv(annoTab, sep="\t", header=0, keep_default_na='True')
if(os.path.isfile(confTab)):
confDF = pd.read_csv(confTab, sep="\t", header=0, keep_default_na='True')
else:
confDF = None
# Read VCF and Traverse through it
vcf_reader = vcf.Reader(open(vcfFile, 'r'))
samples = vcf_reader.samples
pattern = re.compile(aId)
# Get the case and control id
caseIDinVcf = None
controlIDinVcf = None
for sample in samples:
match = re.search(pattern, sample)
if(match):
caseIDinVcf = sample
else:
controlIDinVcf = sample
# traverse through the vcf
count = 0
for record in vcf_reader:
# Define all variables:
(chrom1,
start1,
start2,
chrom2,
filter,
svtype,
brktype,
contype,
conseq) = (None for i in range(9))
(startCT,
endCT,
str1,
str2,
svlengthFromDelly,
mapqFromDelly,
peSupportFromDelly,
srSupportFromDelly,
ciEndNeg,
ciEndPos,
ciPosNeg,
ciPosPos,
caseRC,
caseGQ,
caseDR,
caseDV,
caseRR,
caseRV,
controlGQ,
controlRC,
controlDR,
controlDV,
controlRR,
controlRV) = (0 for i in range(24))
chrom1 = str(record.CHROM)
start1 = record.POS
filter = record.FILTER
if(len(filter) < 1):
filter = None
else:
filter = filter[0]
preciseFlag = record.is_sv_precise
if("END" in record.INFO):
start2 = record.INFO['END']
if("CHR2" in record.INFO):
chrom2 = str(record.INFO['CHR2'])
if("SVTYPE" in record.INFO):
svtype = record.INFO['SVTYPE']
if("SVLEN" in record.INFO):
svlengthFromDelly = np.int(record.INFO['SVLEN'])
else:
if(svtype == "TRA"):
svlengthFromDelly = 0
else:
svlengthFromDelly = np.int(abs(start2 - start1))
if("MAPQ" in record.INFO):
mapqFromDelly = np.int(record.INFO['MAPQ'])
if("PE" in record.INFO):
peSupportFromDelly = np.int(record.INFO['PE'])
if("SR" in record.INFO):
srSupportFromDelly = np.int(record.INFO['SR'])
if("CT" in record.INFO):
contype = record.INFO['CT']
(startCT, endCT) = contype.split("to")
if((int(startCT) == 3) and (int(endCT) == 3)):
str1 = 0
str2 = 0
elif((int(startCT) == 3) and (int(endCT) == 5)):
str1 = 0
str2 = 1
elif((int(startCT) == 5) and (int(endCT) == 3)):
str1 = 1
str2 = 0
elif((int(startCT) == 5) and (int(endCT) == 5)):
str1 = 1
str2 = 1
else:
if(verbose):
logger.warning(
"mergeFinalFiles: The connection type (CT) given in the vcf file is incorrect.CT: %s",
contype)
if("CONSENSUS" in record.INFO):
conseq = record.INFO['CONSENSUS']
if(record.is_sv_precise):
brktype = "PRECISE"
else:
brktype = "IMPPRECISE"
if("CIEND" in record.INFO):
ciEndNeg, ciEndPos = record.INFO['CIEND']
if(abs(ciEndNeg) < 50):
ciEndNeg = 50
if(abs(ciEndPos) < 50):
ciEndNeg = 50
if("CIPOS" in record.INFO):
ciPosNeg, ciPosPos = record.INFO['CIPOS']
if(abs(ciPosNeg) < 50):
ciPosNeg = 50
if(abs(ciPosPos) < 50):
ciPosNeg = 50
caseCalls = record.genotype(caseIDinVcf)
controlCalls = record.genotype(controlIDinVcf)
if(hasattr(caseCalls.data, "GQ")):
caseGQ = np.int(caseCalls.data.GQ)
if(hasattr(caseCalls.data, "RC")):
caseRC = np.int(caseCalls.data.RC)
if(hasattr(caseCalls.data, "DR")):
caseDR = np.int(caseCalls.data.DR)
if(hasattr(caseCalls.data, "DV")):
caseDV = np.int(caseCalls.data.DV)
if(hasattr(caseCalls.data, "RR")):
caseRR = np.int(caseCalls.data.RR)
if(hasattr(caseCalls.data, "RV")):
caseRV = np.int(caseCalls.data.RV)
if(hasattr(controlCalls.data, "GQ")):
controlGQ = np.int(controlCalls.data.GQ)
if(hasattr(controlCalls.data, "RC")):
controlRC = np.int(controlCalls.data.RC)
if(hasattr(controlCalls.data, "DR")):
controlDR = np.int(controlCalls.data.DR)
if(hasattr(controlCalls.data, "DV")):
controlDV = np.int(controlCalls.data.DV)
if(hasattr(controlCalls.data, "RR")):
controlRR = np.int(controlCalls.data.RR)
if(hasattr(controlCalls.data, "RV")):
controlRV = np.int(controlCalls.data.RV)
# Get data from annotation file
(indexList,
annoIndex,
gene1,
gene2,
transcript1,
transcript2,
site1,
site2,
fusion,
rr_site1,
rr_site2,
cc_chr_band,
cc_t_t,
cc_c_s,
cc_m_t,
cc_t_p,
dgv_site1,
dgv_site2
) = (None for i in range(18))
cosmic_fusion_counts = 0
annoDF[['chr1', 'chr2']] = annoDF[['chr1', 'chr2']].astype(str)
annoDF['Cosmic_Fusion_Counts'].fillna(0, inplace=True)
annoDF[['Cosmic_Fusion_Counts']] = annoDF[['Cosmic_Fusion_Counts']].astype(int)
indexList = annoDF.loc[annoDF['chr1'].isin([chrom1]) &
annoDF['pos1'].isin([int(start1)]) &
annoDF['chr2'].isin([chrom2]) &
annoDF['pos2'].isin([int(start2)]) &
annoDF['str1'].isin([str1]) &
annoDF['str2'].isin([str2])].index.tolist()
if(len(indexList) > 1):
if(verbose):
logger.fatal(
"iCallSV::MergeFinalFile: More then one sv have same coordinate in same sample for annotated file. Please check and rerun")
sys.exit(1)
else:
annoIndex = indexList[0]
gene1 = annoDF.iloc[annoIndex]['gene1']
gene2 = annoDF.iloc[annoIndex]['gene2']
transcript1 = annoDF.iloc[annoIndex]['transcript1']
transcript2 = annoDF.iloc[annoIndex]['transcript2']
site1 = annoDF.iloc[annoIndex]['site1']
site2 = annoDF.iloc[annoIndex]['site2']
fusion = annoDF.iloc[annoIndex]['fusion']
rr_site1 = annoDF.iloc[annoIndex]['repName-repClass-repFamily:-site1']
rr_site2 = annoDF.iloc[annoIndex]['repName-repClass-repFamily:-site2']
cosmic_fusion_counts = int(annoDF.iloc[annoIndex]['Cosmic_Fusion_Counts'])
cc_chr_band = annoDF.iloc[annoIndex]['CC_Chr_Band']
cc_t_t = annoDF.iloc[annoIndex]['CC_Tumour_Types(Somatic)']
cc_c_s = annoDF.iloc[annoIndex]['CC_Cancer_Syndrome']
cc_m_t = annoDF.iloc[annoIndex]['CC_Mutation_Type']
cc_t_p = annoDF.iloc[annoIndex]['CC_Translocation_Partner']
dgv_site1 = annoDF.iloc[annoIndex]['DGv_Name-DGv_VarType-site1']
dgv_site2 = annoDF.iloc[annoIndex]['DGv_Name-DGv_VarType-site2']
if(confDF is None):
confidenceScore = None
else:
# Get information for confidence score
confIndex = None
confidenceScore = None
confDF[['Chr1', 'Chr2']] = confDF[['Chr1', 'Chr2']].astype(str)
indexList = confDF.loc[
confDF['Chr1'].isin([chrom1]) & confDF['Start1'].isin(
[int(start1 - abs(ciPosNeg))]) & confDF['Chr2'].isin([chrom2]) &
confDF['Start2'].isin([int(start2 - abs(ciEndNeg))])].index.tolist()
if(len(indexList) > 1):
if(verbose):
logger.fatal(
"iCallSV::MergeFinalFile: More then one sv have same coordinate in same sample for confidence score. Please check and rerun")
sys.exit(1)
else:
confIndex = indexList[0]
confidenceScore = np.float(confDF.iloc[confIndex]['ProbabilityScore'])
# populate final dataframe
outDF.loc[count,
["TumorId", "NormalId", "Chr1", "Pos1", "Chr2", "Pos2", "SV_Type", "Gene1",
"Gene2", "Transcript1", "Transcript2", "Site1Description", "Site2Description",
"Fusion", "ProbabilityScore", "Confidence", "Comments", "Connection_Type",
"SV_LENGTH", "MAPQ", "PairEndReadSupport", "SplitReadSupport", "BrkptType",
"ConsensusSequence", "TumorReferenceCount", "TumorSplitReferenceCount",
"TumorVariantCount", "TumorSplitVariantCount", "TumorReadCount",
"TumorGenotypeQScore", "NormalReferenceCount", "NormalSplitReferenceCount",
"NormalVariantCount", "NormalSplitVariantCount", "NormalReadCount",
"NormalGenotypeQScore", "Cosmic_Fusion_Counts", "repName-repClass-repFamily:-site1",
"repName-repClass-repFamily:-site2", "CC_Chr_Band", "CC_Tumour_Types(Somatic)",
"CC_Cancer_Syndrome", "CC_Mutation_Type", "CC_Translocation_Partner",
"DGv_Name-DGv_VarType-site1", "DGv_Name-DGv_VarType-site2"]] = [aId, bId, chrom1,
start1, chrom2, start2, svtype, gene1, gene2, transcript1, transcript2, site1, site2,
fusion, confidenceScore, None, None, contype, svlengthFromDelly, mapqFromDelly,
peSupportFromDelly, srSupportFromDelly, brktype, conseq, caseDR, caseRR, caseDV, caseRV,
caseRC, caseGQ, controlDR, controlRR, controlDV, controlRV, controlRC, controlGQ, cosmic_fusion_counts,
rr_site1, rr_site2, cc_chr_band, cc_t_t, cc_c_s, cc_m_t, cc_t_p, dgv_site1, dgv_site2]
count = count + 1
# Write Output
outFile = outDir + "/" + outputPrefix + "_merged.txt"
outDF.to_csv(outFile, sep='\t', index=False)
if(verbose):
logger.info("iCallSV::MergeFinalFile: Finished merging, Final data written in %s", outFile)
return(outFile)
| apache-2.0 |
zuku1985/scikit-learn | benchmarks/bench_plot_omp_lars.py | 72 | 4514 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import six
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import matplotlib.pyplot as plt
fig = plt.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(six.iteritems(results))):
ax = fig.add_subplot(1, 2, i+1)
vmax = max(1 - timings.min(), -1 + timings.max())
plt.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + [str(each) for each in samples_range])
ax.set_yticklabels([''] + [str(each) for each in features_range])
plt.xlabel('n_samples')
plt.ylabel('n_features')
plt.title(label)
plt.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = plt.axes([0.1, 0.08, 0.8, 0.06])
plt.colorbar(cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
amitkaps/full-stack-data-science | server-setup/files/etc/jupyter_notebook_config.py | 1 | 22522 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which messages can be sent on iopub before they
# are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
#c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = ''
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine though ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
c.NotebookApp.token = 'fullstack'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'ds'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
| mit |
annahs/atmos_research | NC_coated_distrs_for_each_core_size.py | 1 | 12060 | import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import matplotlib.pyplot as plt
import matplotlib.colors
from matplotlib import dates
from mpl_toolkits.basemap import Basemap
import calendar
from scipy.optimize import curve_fit
flight = 'science 10'
flight_times = {
#'science 1' : [datetime(2015,4,5,9,0), datetime(2015,4,5,14,0) ,''],
#'ferry 1' : [datetime(2015,4,6,9,0), datetime(2015,4,6,11,0) ,'UHSAS_Polar6_20150406_R0_V1.ict'],
#'ferry 2' : [datetime(2015,4,6,15,0), datetime(2015,4,6,18,0) ,'UHSAS_Polar6_20150406_R0_V2.ict'],
#'science 2' : [datetime(2015,4,7,16,0), datetime(2015,4,7,21,0) ,'UHSAS_Polar6_20150407_R0_V1.ict'],
#'science 3' : [datetime(2015,4,8,13,0), datetime(2015,4,8,17,0) ,'UHSAS_Polar6_20150408_R0_V1.ict'],
#'science 4' : [datetime(2015,4,8,17,30),datetime(2015,4,8,22,0) ,'UHSAS_Polar6_20150408_R0_V2.ict'],
#'science 5' : [datetime(2015,4,9,13,30),datetime(2015,4,9,18,0) ,'UHSAS_Polar6_20150409_R0_V1.ict'],
#'ferry 3' : [datetime(2015,4,10,14,0),datetime(2015,4,10,17,0),'UHSAS_Polar6_20150410_R0_V1.ict'],
#'science 6' : [datetime(2015,4,11,15,0),datetime(2015,4,11,22,0),'UHSAS_Polar6_20150411_R0_V1.ict'],
#'science 7' : [datetime(2015,4,13,15,0),datetime(2015,4,13,21,0),'UHSAS_Polar6_20150413_R0_V1.ict'],
#'science 8' : [datetime(2015,4,20,15,0),datetime(2015,4,20,20,0),'UHSAS_Polar6_20150420_R0_V1.ict'],
#'science 9' : [datetime(2015,4,20,21,0),datetime(2015,4,21,2,0) ,'UHSAS_Polar6_20150420_R0_V2.ict'],
'science 10' : [datetime(2015,4,21,16,8),datetime(2015,4,21,16,18),'UHSAS_Polar6_20150421_R0_V1.ict'], ###
}
start_time = flight_times[flight][0]
end_time = flight_times[flight][1]
UNIX_start_time = calendar.timegm(start_time.utctimetuple())
UNIX_end_time = calendar.timegm(end_time.utctimetuple())
print start_time, UNIX_start_time
print end_time, UNIX_end_time
incand_calib_intercept = 0.19238 #alert = 0.19238
incand_calib_slope = 0.00310 #alert = 0.00310
R = 8.3144621 # in m3*Pa/(K*mol)
sample_flow_lower_limit = 100
min_BC_VED = 70
max_BC_VED = 220
min_rBC_mass = ((min_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
max_rBC_mass = ((max_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15)
incand_min = (min_rBC_mass-incand_calib_intercept)/incand_calib_slope
incand_max = (max_rBC_mass-incand_calib_intercept)/incand_calib_slope
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
for UHSAS_file in ['UHSAS_Polar6_20150410_R0_V1.ict','UHSAS_Polar6_20150411_R0_V1.ict','UHSAS_Polar6_20150413_R0_V1.ict','UHSAS_Polar6_20150420_R0_V1.ict','UHSAS_Polar6_20150420_R0_V2.ict']:
with open(UHSAS_file, 'r') as f:
print UHSAS_file
file_date = UHSAS_file[13:21]
date = datetime.strptime(file_date, '%Y%m%d')
##get bin limits
i=0
while i < 9: #indep_var_number is always on line 10
f.readline()
i+=1
indep_var_number = float(f.readline())
i=0
while i < (indep_var_number + 11): #check that 11 is right for each set of files
f.readline()
i+=1
bin_LL_line = (f.readline()).split()
f.readline() #skip this line
bin_UL_line = (f.readline()).split()
##create bins dict
bin_dict = {}
i=0
for LL_limit in bin_LL_line:
bin_dict[i] = [float(LL_limit),float(bin_UL_line[i])]
i+=1
#start analysis
data_start = False
for line in f:
no_prev_particle = False
if line.startswith('time,flow,pressure,total_number_conc'):
data_start = True
continue
if data_start == True:
newline = line.split()
time_stamp = date + timedelta(seconds = float(newline[0].rstrip(',')))
UNIX_time_stamp = calendar.timegm(time_stamp.utctimetuple())
time_min = UNIX_time_stamp - 1
time_max = UNIX_time_stamp
#print progress reports
if printcounter == 100:
print time_stamp
printcounter = 0
printcounter += 1
####
#get the sample flow from the hk data to calc sampled volume
cursor.execute(('SELECT sample_flow from polar6_hk_data_2015 where UNIX_UTC_ts >= %s and UNIX_UTC_ts < %s'),(time_min,time_max))
hk_data = cursor.fetchall()
#if no hk data collected we can't get a number conc, but we can still get a mean coating and core size, so continue, but sample flow is nan
if hk_data == []:
sample_flow = np.nan
else:
sample_flow = hk_data[0][0] #in vccm
#drops in the sample flow are an issue so don't calc a conc for these periods
if sample_flow <= sample_flow_lower_limit:
sample_flow = np.nan
#get the timestamp from the last valid particle in the interval
cursor.execute(('SELECT UNIX_UTC_ts FROM polar6_coating_2015 WHERE UNIX_UTC_ts < %s AND particle_type = %s and instrument = %s and incand_amp >=%s and incand_amp <=%s order by UNIX_UTC_ts desc limit 1'),(time_max, 'incand', 'UBCSP2',incand_min,incand_max))
last_particle_data = cursor.fetchall()
last_particle_ts = last_particle_data[0][0]
#get timestamp from last valid particle before this interval so we can caluculate the volume sampled
cursor.execute(('SELECT UNIX_UTC_ts FROM polar6_coating_2015 WHERE UNIX_UTC_ts < %s AND particle_type = %s and instrument = %s and incand_amp >=%s and incand_amp <=%s order by UNIX_UTC_ts desc limit 1'),(time_min, 'incand', 'UBCSP2',incand_min,incand_max))
prev_particle_data = cursor.fetchall()
#take care of the edge-case where we're looking at the first particle of the run, in this case we'll ignore the first particle in the interval since we don't know when we started waiting for it to be detected
if prev_particle_data == []:
#in this case get the timestamp from the first valid particle in the interval
cursor.execute(('SELECT UNIX_UTC_ts FROM polar6_coating_2015 WHERE UNIX_UTC_ts >= %s AND particle_type = %s and instrument = %s and incand_amp >=%s and incand_amp <=%s order by UNIX_UTC_ts limit 1'),(time_min, 'incand', 'UBCSP2',incand_min,incand_max))
substitute_prev_particle_data = cursor.fetchall()
prev_particle_ts = substitute_prev_particle_data[0][0]
no_prev_particle = True
else:
prev_particle_ts = prev_particle_data[0][0]
#calc total interval sampling time and sampled volume
interval_sampling_time = last_particle_ts - prev_particle_ts
if interval_sampling_time <= 0:
#print 'interval_sampling_time bad', interval_sampling_time
interval_sampled_volume = np.nan
else:
interval_sampled_volume = sample_flow*interval_sampling_time/60 #factor of 60 to convert minutes to secs, result is in cc
#get T and P for correction to STP/SCCM
cursor.execute(('SELECT temperature_C,BP_Pa from polar6_flight_track_details where UNIX_UTC_ts > %s and UNIX_UTC_ts <= %s'),(time_min,time_max))
TandP_data = cursor.fetchall()
#now get the particle data per bin
for bin_number in range(0,len(bin_dict)):
bin_LL = bin_dict[bin_number][0]
bin_UL = bin_dict[bin_number][1]
##### SP2 data
#get core + coating count
cursor.execute(('SELECT count(*) from polar6_coating_2015 where UNIX_UTC_ts >= %s and UNIX_UTC_ts < %s and particle_type = %s and instrument = %s and (POW(rBC_mass_fg,(1/3.0))*101.994391398+2*coat_thickness_nm) >=%s and (POW(rBC_mass_fg,(1/3.0))*101.994391398+2*coat_thickness_nm) <=%s'),
(time_min,time_max, 'incand', 'UBCSP2',bin_LL,bin_UL))
core_plus_coating_count = cursor.fetchall()[0][0]
#get core only data
cursor.execute(('SELECT rBC_mass_fg, coat_thickness_nm from polar6_coating_2015 where UNIX_UTC_ts >= %s and UNIX_UTC_ts < %s and particle_type = %s and instrument = %s and (POW(rBC_mass_fg,(1/3.0))*101.994391398) >=%s and (POW(rBC_mass_fg,(1/3.0))*101.994391398) <=%s'),
(time_min,time_max, 'incand', 'UBCSP2',bin_LL,bin_UL))
core_only_data = cursor.fetchall()
#### UHSAS data
UHSAS_norm_number = float(newline[bin_number+4].rstrip(',')) #this is dN/dLogD per sccm
#check that we have both valid UHSAS and SP2 data, we can only make a meaningful UHSAS/SP2 conc ratio if we have valid measurements for both
if UHSAS_norm_number < 0: #-9999 is missing data and any other negative is a data problem
#print UNIX_time_stamp, 'no UHSAS data'
UHSAS_number = None
core_plus_coating_number_conc = None
core_only_number_conc = None
elif TandP_data == []:
#print UNIX_time_stamp, 'no SP2 data: T and P missing'
UHSAS_number = None
core_plus_coating_number_conc = None
core_only_number_conc = None
elif np.isnan(interval_sampled_volume) == True:
#print UNIX_time_stamp, 'no SP2 data: no, or bad, sample flow data'
UHSAS_number = None
core_plus_coating_number_conc = None
core_only_number_conc = None
else:
temperature = TandP_data[0][0] + 273.15 #convert to Kelvin
pressure = TandP_data[0][1]
correction_factor_for_STP = (101325/pressure)*(temperature/273.15)
UHSAS_number = UHSAS_norm_number*(math.log(bin_UL)-math.log(bin_LL)) #this is dN per sccm
core_plus_coating_number_conc = core_plus_coating_count*correction_factor_for_STP/interval_sampled_volume #dN/sccm
core_only_number_conc = len(core_only_data)*correction_factor_for_STP/interval_sampled_volume
if no_prev_particle == True: #in this case need to ignore first particle (but don't want negative if the count is zero)
if core_only_count > 0:
core_only_number_conc = (len(core_only_data)-1)*correction_factor_for_STP/interval_sampled_volume
else:
core_only_number_conc = 0
if core_plus_coating_count > 0:
core_plus_coating_number_conc = (core_plus_coating_count-1)*correction_factor_for_STP/interval_sampled_volume #dN/sccm
else:
core_plus_coating_number_conc = 0
#calcualte and write mean core and coating sizes (need a core and a coating value)
new_list = []
for row in core_only_data:
mass = row[0]
coat = row[1]
if mass != None and coat != None:
new_list.append([mass, coat])
if new_list != []:
mean_rBC_mass = np.mean([row[0] for row in new_list])
mean_core_dia = (((mean_rBC_mass/(10**15*1.8))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
mean_coating = np.mean([row[1] for row in new_list])
binned_data = {
'UNIX_UTC_ts': UNIX_time_stamp,
'binLL': bin_LL,
'binUL': bin_UL,
'property': 'mean_core_dia',
'prop_value': float(mean_core_dia),
}
cursor.execute(add_data, binned_data)
cnx.commit()
binned_data = {
'UNIX_UTC_ts': UNIX_time_stamp,
'binLL': bin_LL,
'binUL': bin_UL,
'property': 'mean_coating_th',
'prop_value': float(mean_coating),
}
cursor.execute(add_data, binned_data)
cnx.commit()
#write number concs if we have the available data
if UHSAS_number != None and core_plus_coating_number_conc != None and core_only_number_conc != None:
binned_data = {
'UNIX_UTC_ts': UNIX_time_stamp,
'binLL': bin_LL,
'binUL': bin_UL,
'property': 'UHSAS_#',
'prop_value': UHSAS_number,
}
cursor.execute(add_data, binned_data)
cnx.commit()
binned_data = {
'UNIX_UTC_ts': UNIX_time_stamp,
'binLL': bin_LL,
'binUL': bin_UL,
'property': 'SP2_coated_#',
'prop_value': core_plus_coating_number_conc,
}
cursor.execute(add_data, binned_data)
cnx.commit()
binned_data = {
'UNIX_UTC_ts': UNIX_time_stamp,
'binLL': bin_LL,
'binUL': bin_UL,
'property': 'SP2_core_#',
'prop_value': core_only_number_conc,
}
cursor.execute(add_data, binned_data)
cnx.commit()
cnx.close()
| mit |
bsipocz/statsmodels | statsmodels/sandbox/examples/try_smoothers.py | 39 | 2655 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 01 15:17:52 2011
Author: Mike
Author: Josef
mainly script for checking Kernel Regression
"""
import numpy as np
if __name__ == "__main__":
#from statsmodels.sandbox.nonparametric import smoothers as s
from statsmodels.sandbox.nonparametric import smoothers, kernels
import matplotlib.pyplot as plt
#from numpy import sin, array, random
import time
np.random.seed(500)
nobs = 250
sig_fac = 0.5
#x = np.random.normal(size=nobs)
x = np.random.uniform(-2, 2, size=nobs)
#y = np.array([np.sin(i*5)/i + 2*i + (3+i)*np.random.normal() for i in x])
y = np.sin(x*5)/x + 2*x + sig_fac * (3+x)*np.random.normal(size=nobs)
K = kernels.Biweight(0.25)
K2 = kernels.CustomKernel(lambda x: (1 - x*x)**2, 0.25, domain = [-1.0,
1.0])
KS = smoothers.KernelSmoother(x, y, K)
KS2 = smoothers.KernelSmoother(x, y, K2)
KSx = np.arange(-3, 3, 0.1)
start = time.time()
KSy = KS.conf(KSx)
KVar = KS.std(KSx)
print(time.time() - start) # This should be significantly quicker...
start = time.time() #
KS2y = KS2.conf(KSx) #
K2Var = KS2.std(KSx) #
print(time.time() - start) # ...than this.
KSConfIntx, KSConfInty = KS.conf(15)
print("Norm const should be 0.9375")
print(K2.norm_const)
print("L2 Norms Should Match:")
print(K.L2Norm)
print(K2.L2Norm)
print("Fit values should match:")
#print zip(KSy, KS2y)
print(KSy[28])
print(KS2y[28])
print("Var values should match:")
#print zip(KVar, K2Var)
print(KVar[39])
print(K2Var[39])
fig = plt.figure()
ax = fig.add_subplot(221)
ax.plot(x, y, "+")
ax.plot(KSx, KSy, "-o")
#ax.set_ylim(-20, 30)
ax2 = fig.add_subplot(222)
ax2.plot(KSx, KVar, "-o")
ax3 = fig.add_subplot(223)
ax3.plot(x, y, "+")
ax3.plot(KSx, KS2y, "-o")
#ax3.set_ylim(-20, 30)
ax4 = fig.add_subplot(224)
ax4.plot(KSx, K2Var, "-o")
fig2 = plt.figure()
ax5 = fig2.add_subplot(111)
ax5.plot(x, y, "+")
ax5.plot(KSConfIntx, KSConfInty, "-o")
import statsmodels.nonparametric.smoothers_lowess as lo
ys = lo.lowess(y, x)
ax5.plot(ys[:,0], ys[:,1], 'b-')
ys2 = lo.lowess(y, x, frac=0.25)
ax5.plot(ys2[:,0], ys2[:,1], 'b--', lw=2)
#need to sort for matplolib plot ?
xind = np.argsort(x)
pmod = smoothers.PolySmoother(5, x[xind])
pmod.fit(y[xind])
yp = pmod(x[xind])
ax5.plot(x[xind], yp, 'k-')
ax5.set_title('Kernel regression, lowess - blue, polysmooth - black')
#plt.show()
| bsd-3-clause |
HaythemSahbani/Web-mining-university-project | src/hashtag_classification.py | 1 | 4022 | from twitter_crawl import TwitterCrawl
class HtagClassifier:
def __init__(self):
pass
@staticmethod
def htag_classifier(list_json_frame):
"""
:param list containing json dictionary:
:return:
"""
topic_dict = dict([])
topic_dict["no_htags_tweet"] = []
for json_frame in list_json_frame:
no_htag_test = True
for word in json_frame["text"].split(" "):
if word.startswith("#"):
no_htag_test = False
try:
topic_dict[word].append((json_frame["text"], json_frame["date"]))
except:
topic_dict[word] = [(json_frame["text"], json_frame["date"])]
if no_htag_test:
topic_dict["no_htags_tweet"].append((json_frame["text"], json_frame["date"]))
return topic_dict
@staticmethod
def get_most_frequent_htag(dic):
lst = [word for word, frequency in sorted(dic.items(), key=lambda t: len(t[1]), reverse=True)]
print("Most frequent tweets:", lst[1:6])
return lst
import matplotlib.pyplot as plt
from matplotlib.dates import MonthLocator, WeekdayLocator, DateFormatter, MONDAY
from datetime import datetime, timedelta
from calendar import monthrange
import numpy
def monthdelta(d1, d2):
delta = 0
while True:
mdays = monthrange(d1.year, d1.month)[1]
d1 += timedelta(days=mdays)
if d1 <= d2:
delta += 1
else:
break
return delta
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
import matplotlib
def evolution_all_month(time, nb_month, nb_tweets_month):
# every monday
mondays = WeekdayLocator(MONDAY)
# every 3rd month
months = MonthLocator(range(1, 13), bymonthday=1, interval=1)
monthsFmt = DateFormatter("%b '%y")
k = len(time)
fig, ax = plt.subplots(nrows=k, sharex=True)
dates = []
for ind in range(k):
dates.append([])
ndays = 0
first_day = time[ind][-1]
for dt in range(nb_month[ind]+1):
month = first_day.month+dt
if month % 13 == 0:
ndays += monthrange(first_day.year+month/12, 1)[1]
else:
ndays += monthrange(first_day.year+month/12, month % 13)[1]
dates[ind].append(timedelta(days=ndays)+first_day)
ax[ind].bar(dates[ind], nb_tweets_month[ind], width=20, color=numpy.random.rand(3, 1))
ax[ind].xaxis_date()
ax[ind].xaxis.set_major_locator(months)
ax[ind].xaxis.set_major_formatter(monthsFmt)
ax[ind].xaxis.set_minor_locator(mondays)
ax[ind].set_title('evolution of Htag #'+str(ind+1))
ax[ind].autoscale_view()
#ax.xaxis.grid(False, 'major')
#ax.xaxis.grid(True, 'minor')
ax[ind].grid(True)
fig.autofmt_xdate()
fig.subplots_adjust(hspace=0.5)
plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)
plt.show()
def plot(dic):
dic = HtagClassifier().htag_classifier(dic)
s = HtagClassifier().get_most_frequent_htag(dic)[1:6]
time = []
k = len(s)
for i in range(k):
time.append([])
for htag in s:
for j in range(len(dic[htag])):
term = dic[htag][j][1]
date = term[:19] + term[25:]
time[s.index(htag)].append(datetime.strptime(date, "%a %b %d %H:%M:%S %Y"))
nb_tweets_month = []
duration = []
nb_month = []
for ind in range(k):
duration.append(time[ind][0]-time[ind][-1])
nb_month.append(monthdelta(time[ind][-1], time[ind][0]))
nb_tweets_month.append([])
for k in range(nb_month[ind]+1):
nb_tweets_month[ind].append(0)
for j in xrange(len(time[ind])-1, -1, -1):
nb_tweets_month[ind][monthdelta(time[ind][-1], time[ind][j])] += 1
evolution_all_month(time, nb_month, nb_tweets_month)
| mit |
yabata/prodyn | examples/building_with_storage/building_with_storage_model.py | 1 | 6671 | import pandas as pd
import numpy as np
import pyrenn as prn
import pdb
def read_data(file):
"""Read data about the system from the excel file and assign
it to different parameters
Args:
file: excel file, which stores all data about considered system
Returns:
cst: constants, which describe the system
srs: parameters, which are variable with time
U: list of possible decisions
states: values, which set number of states and characterize
all possible ones
"""
xls = pd.ExcelFile(file)
states = xls.parse('DP-States',index_col=[0])
cst = xls.parse('Constants',index_col=[0])['Value']
srs = xls.parse('Time-Series',index_col=[0])
U = xls.parse('DP-Decisions',index_col=[0])['Decisions'].values
return cst,srs,U,states
###################################
#for 2 states - temperature and heat-storage
###################################
def building_with_storage(u,x,t,cst,Srs,Data):
"""For current timestep t and for current decision u transition from
actual timestep i to the following timestep j is simulated for all
possible states, which are stored in x.
Costs of this transition and array of states after it are calculated.
Args:
u: decision from list of possible ones
x: array, where all possible system states are stored
t: actual timestep i
cst: constants needed for calculation
srs: values of needed timeseries
Returns:
cost: costs at timestep i
x_j: array with states at timestep j after transition due to
decision u
data: dataframe, which keeps additional infromation about
transition from i to j
"""
###############################################
#Defining T_room and P_th for timestep j
#with using pre-trained NN
###############################################
l = len(x[0])
delay=4
net = cst['net']
#create 5 inputs for input array P
hour = Srs.loc[t]['hour']
solar = Srs.loc[t]['solar']
T_amb = Srs.loc[t]['T_amb']
user = Srs.loc[t]['use_room']
T_inlet = Srs.loc[t]['T_inlet']
#create 6th input in dependance of current decision
if u=='pump on/storage on' or u=='pump off/storage on':
massflow = cst['massflow']
elif u=='pump off/storage off' or u=='pump on/storage off':
massflow = 0
#defining input array P for NN
P = np.array([[hour],[solar],[T_amb],[user],[massflow],[T_inlet]],dtype = np.float)
#prepare 5 inputs for P0
hour0 = Srs.loc[t-delay:t-1]['hour'].values.copy()
solar0 = Srs.loc[t-delay:t-1]['solar'].values.copy()
T_amb0 = Srs.loc[t-delay:t-1]['T_amb'].values.copy()
user0 = Srs.loc[t-delay:t-1]['use_room'].values.copy()
T_inlet0 = Srs.loc[t-delay:t-1]['T_inlet'].values.copy()
#defining initial values, which are used inside the loop
T_roomj = np.zeros(l)
P_th = np.zeros(l)
#defining initial values, which are used outside the loop
E_j = np.zeros(l)
P_el = np.zeros(l)
costx = np.zeros(l)
#loop for every possible temperature state
for i,x1 in enumerate(x[0]):
#prepare 6th input for P0 and 2 outputs for Y0
if t-delay<cst['t_start']:
#take all values for P0 and Y0 from timeseries
if Data is None or t==cst['t_start']:
T_room0 = np.ones(delay) * x1
P_th0 = Srs.loc[t-delay:t-1]['P_th'].values.copy()
massflow0 = Srs.loc[t-delay:t-1]['massflow'].values.copy()
#take part of values from timeseries and part from big Data
else:
tx = t-cst['t_start']
T_room0 = np.concatenate([Srs.loc[t-delay:t-tx-1]['T_room'].values.copy(),Data.loc[t-tx-1:t-1].xs(i,level='Xidx_end')['T_room'].values.copy()])
P_th0 = np.concatenate([Srs.loc[t-delay:t-tx-1]['P_th'].values.copy(),Data.loc[t-tx-1:t-1].xs(i,level='Xidx_end')['P_th'].values.copy()])
massflow0 = np.concatenate([Srs.loc[t-delay:t-tx-1]['massflow'].values.copy(),Data.loc[t-tx-1:t-1].xs(i,level='Xidx_end')['massflow'].values.copy()])
#take all values for P0 and Y0 from big Data
else:
T_room0 =Data.loc[t-delay:t-1].xs(i,level='Xidx_end')['T_room'].values.copy()
P_th0 = Data.loc[t-delay:t-1].xs(i,level='Xidx_end')['P_th'].values.copy()
massflow0 = Data.loc[t-delay:t-1].xs(i,level='Xidx_end')['massflow'].values.copy()
#Create P0 and Y0
P0 = np.array([hour0,solar0,T_amb0,user0,massflow0,T_inlet0],dtype = np.float)
Y0 = np.array([T_room0,P_th0],dtype = np.float)
#run NN for one timestep
if np.any(P0!=P0) or np.any(Y0!=Y0) or np.any(Y0>1000):
#if P0 or Y0 not valid use valid values and apply penalty costs
costx[i] = 1000*10
T_roomj[i] = x1
P_th[i] = 0
else:
T_roomj[i],P_th[i] = prn.NNOut(P,net,P0=P0,Y0=Y0)
if T_roomj[i] != T_roomj[i] or P_th[i] != P_th[i]:
pdb.set_trace()
#calculating heat-storage state in dependance of chosen decision
P_hp = 2
if u=='pump on/storage on':
E_j=x[1]
P_el = 3*P_th
elif u=='pump on/storage off':
E_j=x[1]+P_hp*3*0.25
P_el = P_hp
elif u=='pump off/storage on':
E_j=x[1]-P_th*0.25
P_el = 0
elif u=='pump off/storage off':
E_j=x[1]
P_el = 0
costx = costx + 99999*(E_j<x[1][0]) + 99999*(E_j>x[1][-1])
###############################################
#Building array x_j for the timestep j and
#calculating all costs for transition from i to j
###############################################
#building x_j
x_j=np.vstack((T_roomj,E_j))
#selecting borders for allowed Troom
Tmax = Srs.loc[t]['Tmax']
Tmin = Srs.loc[t]['Tmin']
#selecting borders for possible energy content of heat storage E
Emax=x[1][-1]
Emin=x[1][0]
#Calculate penalty costs
costx = (x_j[0]>Tmax)*(x_j[0]-Tmax)**2*99999 + (x_j[0]<Tmin)*(x_j[0]<Tmin)**2*9999\
+(x_j[1]>Emax)*99999 + (x_j[1]<Emin)*99999\
+costx
#correcting x_j
x_j[0] = np.clip(x_j[0],x[0][0],x[0][-1])
x_j[1] = np.clip(x_j[1],x[1][0],x[1][-1])
#Calculate costs
cost = P_el * Srs.loc[t]['price_elec']*0.25 + costx
#Define results to be put in Data
data = pd.DataFrame(index = np.arange(l))
data['P_th'] = P_th
data['P_el'] = P_el
data['T_room'] = x_j[0]
data['E'] = x_j[1]
data['massflow'] = massflow
data['cost'] = cost
data['costx'] = costx
return cost, x_j, data
| gpl-3.0 |
khkaminska/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
ndawe/root_numpy | docs/sphinxext/gen_rst.py | 3 | 14352 | """
Example generation for root_numpy
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from time import time
import os
import shutil
import traceback
import glob
import sys
from StringIO import StringIO
import token
import tokenize
import matplotlib
matplotlib.use('Agg')
import ROOT
ROOT.gROOT.SetBatch(True)
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
.. image:: images/%s
:scale: 50
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
:scale: 80
"""
def extract_docstring(filename):
""" Extract a module-level docstring, if any
"""
lines = file(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(iter(lines).next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
end_row = erow + 1 + start_row
if lines and lines[end_row - 2] == 'print(__doc__)\n':
end_row += 1
return docstring, first_par, end_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples')
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# we create an index.rst with all examples
fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
#Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
display: none;
}
.figure {
float: left;
margin: 10px;
width: auto;
height: 200px;
width: 180px;
}
.figure img {
display: inline;
}
.figure .caption {
width: 170px;
text-align: center !important;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
fhindex.flush()
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print('Example directory %s does not have a README.txt file' % src_dir)
print('Skipping this directory')
print(80 * '_')
return
fhindex.write("""
%s
""" % file(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
def sort_key(a):
# put last elements without a plot
if not a.startswith('plot') and a.endswith('.py'):
return 'zz' + a
return a
fhindex.write("""
.. toctree::
""")
for fname in sorted(os.listdir(src_dir), key=sort_key):
if fname.endswith('py'):
generate_file_rst(fname, target_dir, src_dir, plot_gallery)
#thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
#fhindex.write('.. figure:: %s\n' % thumb)
#if link_name.startswith('._'):
# link_name = link_name[2:]
#if dir != '.':
# fhindex.write(' :target: ./%s/%s.html\n\n' % (dir,
# fname[:-3]))
#else:
# fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3])
fhindex.write("""
%s/%s
""" % (dir, fname[:-3]))
fhindex.write("""
.. raw:: html
<div style="clear: both"></div>
""") # clear at the end of the section
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
""" Generate the rst file for a given example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%s.png' % base_image_name
root_image_fname = 'root_%s_%%s.png' % base_image_name
root_fig_num = 1
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
root_image_path = os.path.join(image_dir, root_image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
first_root_image_file = root_image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if (not os.path.exists(first_image_file) or
not os.path.exists(first_root_image_file) or
os.stat(first_image_file).st_mtime <=
os.stat(src_file).st_mtime):
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for fig_num in (fig_mngr.num for fig_mngr in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path % fig_num)
figure_list.append(image_fname % fig_num)
for canvas in ROOT.gROOT.GetListOfCanvases():
maybe_root_filename = os.path.join(os.path.dirname(src_file), canvas.name)
if os.path.isfile(maybe_root_filename):
os.rename(maybe_root_filename, os.path.join(image_dir, canvas.name))
figure_list.append(canvas.name)
canvas.Close()
else:
canvas.SaveAs(root_image_path % root_fig_num)
canvas.Close()
figure_list.append(root_image_fname % root_fig_num)
root_fig_num += 1
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path % '[1-9]')]
#for f in glob.glob(image_path % '*')]
# generate thumb file
this_template = plot_rst_template
from matplotlib import image
if os.path.exists(first_image_file):
image.thumbnail(first_image_file, thumb_file, 0.2)
elif os.path.exists(first_root_image_file):
image.thumbnail(first_root_image_file, thumb_file, 0.2)
if not os.path.exists(thumb_file):
# create something not to replace the thumbnail
shutil.copy('images/blank_image.png', thumb_file)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
| bsd-3-clause |
mtpain/metacorps | projects/viomet/vis.py | 1 | 10192 | '''
Plots for my first-year (and beyond) figurative violence in the media project.
Author: Matthew Turner <[email protected]>
Date: April 01, 2017
'''
import matplotlib.pyplot as plt
import matplotlib.dates as pltdates
import pandas as pd
import seaborn as sns
from datetime import date, datetime, timedelta
from .analysis import relative_likelihood
from projects.common.analysis import (
daily_frequency, daily_metaphor_counts
)
CUR_PAL = sns.color_palette()
# for 8.5x11 paper
DEFAULT_FIGSIZE = (7.5, 5)
def by_network_frequency_figure(
frequency_df,
date_range=pd.date_range(
'2016-09-01', '2016-11-30', freq='D'
),
iatv_corpus_name=None,
freq=True,
partition_infos=None,
font_scale=1.15,
save_path=None):
sns.axes_style("darkgrid")
sns.set(font_scale=font_scale)
CUR_PAL = sns.color_palette()
df = frequency_df
# fits are not being shown for this condition
if (partition_infos is None):
if freq:
network_freq = daily_frequency(
df, date_range, iatv_corpus_name, by=['network']
)
network_freq.plot(style='o')
else:
full_df = daily_metaphor_counts(
df, ['network'], date_range
)[['MSNBCW', 'CNNW', 'FOXNEWSW']]
full_df.plot(style='o')
# show fits TODO Include more arguments so that fits don't have to be
# generated just to plot. Generate fits outside and pass fits in.
else:
if freq:
# put networks in desired order, left to right
networks = ['MSNBCW', 'CNNW', 'FOXNEWSW']
network_freq = daily_frequency(
df, date_range, iatv_corpus_name, by=['network']
)
ax = network_freq[networks].plot(
style='o', ms=14, alpha=0.5, legend=False,
figsize=DEFAULT_FIGSIZE
)
for net_idx, network in enumerate(networks):
pinfo = partition_infos[network]
day_td = timedelta(seconds=60)
d0 = date_range[0]
d1 = pinfo.partition_date_1 - day_td
d2 = pinfo.partition_date_1
d3 = pinfo.partition_date_2
d4 = pinfo.partition_date_2 + day_td
d5 = date_range[-1]
fg = pinfo.f_ground
fe = pinfo.f_excited
dates = pd.DatetimeIndex([d0, d1, d2, d3, d4, d5])
datas = [fg, fg, fe, fe, fg, fg]
network_formatted = ['MSNBC', 'CNN', 'Fox News']
pd.Series(
index=dates, data=datas
).plot(
lw=8, ax=ax, ls='-', color=CUR_PAL[net_idx], alpha=0.9,
legend=True, label=network_formatted[net_idx]
)
ax.xaxis.set_minor_formatter(pltdates.DateFormatter('%-d'))
ax.xaxis.set_minor_locator(pltdates.DayLocator(bymonthday=(1, 15)))
ax.grid(which='minor', axis='x')
ax.set_xlabel('Date')
ax.set_ylabel('Frequency of usage')
ax.set_title(
'Metaphorical violence usage on each of the three networks'
)
plt.tight_layout()
if save_path is not None:
fig = ax.get_figure()
fig.savefig(save_path)
plt.close()
# FIGURE 1
def plot_daily_usage(df, ma_period=7, lw=3, marker='o', ms=10, xlab='Date',
ylab='Figurative uses',
title='Figurative uses of violent words'):
'''
Plot daily and `ma_period`-day moving average from dataframe with index
of every date in observation period (currently Sep 1 - Nov 30)
'''
sns.set(font_scale=1.75)
# calculate the moving average over ma_period
ma = df.rolling(ma_period).mean().fillna(0)
ax = ma.plot(lw=lw, figsize=(15, 12))
df.plot(marker='o', ms=ms, lw=0, ax=ax, color=CUR_PAL)
p, _ = ax.get_legend_handles_labels()
leg = ax.legend(p, ['attack ({}-day MA)'.format(ma_period), 'beat', 'hit',
'attack (daily)', 'beat', 'hit'], frameon=True)
leg.get_frame().set_facecolor('white')
# XXX magic number gotten from ipython session
plt.xlim([17044, plt.xlim()[1]])
plt.ylim([0, 17])
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
ax.axvline(date(2016, 9, 26), color='k', lw=3, ls='--')
ax.axvline(date(2016, 10, 9), color='k', lw=3, ls='--')
ax.axvline(date(2016, 10, 19), color='k', lw=3, ls='--')
ax.axvline(date(2016, 11, 8), color='k', lw=3, ls='--')
plt.tight_layout()
def plot_total_daily_usage(series, ma_period=5, lw=3, marker='o', ms=6,
xlab='Date', ylab='Frequency of uses',
plot_ma=False,
show_debates=False, show_election=False,
show_means=False,
save_path=None,
title='Figurative violence usage during debate season'):
sns.set(font_scale=1.5)
fig = plt.figure(figsize=(16, 9))
ax = series.plot(style='ok', markerfacecolor="None", markeredgecolor="black",
markeredgewidth=5, lw=0, ms=ms)
if plot_ma:
ma = series.rolling(ma_period).mean().fillna(0)
ax = ma.plot(style='k-', lw=lw, ax=ax, figsize=(15, 12))
p, _ = ax.get_legend_handles_labels()
leg = ax.legend(p, ['frequency of usage ({}-day) MA'.format(ma_period),
'frequency of usage per day'
]
)
# else:
# p, _ = ax.get_legend_handles_labels()
# leg = ax.legend(p, ['frequency of usage per day'])
# leg.get_frame().set_facecolor('white')
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
if show_debates:
ax.axvline(date(2016, 9, 26), color='#cc0000', lw=3, ls='--')
ax.axvline(date(2016, 10, 9), color='#cc0000', lw=3, ls='--')
ax.axvline(date(2016, 10, 19), color='#cc0000', lw=3, ls='--')
plt.text(date(2016, 9, 25), 6, 'First Debate\nSeptember 26',
horizontalalignment='right', color='#cc0000')
plt.text(date(2016, 10, 8), 6.5, 'Second Debate\nOctober 9',
horizontalalignment='right', color='#cc0000')
plt.text(date(2016, 10, 20), 6, 'Third Debate\nOctober 19',
horizontalalignment='left', color='#cc0000')
if show_election:
ax.axvline(date(2016, 11, 8), color='#cc0000', lw=3, ls='--')
if show_means:
ax.plot(
[date(2016, 9, 1), date(2016, 9, 24)], [1, 1],
lw=5, color='#cc0000'
)
ax.plot(
[date(2016, 9, 25), date(2016, 10, 26)], [2.7, 2.7],
lw=5, color='#cc0000'
)
ax.plot(
[date(2016, 10, 27), date(2016, 11, 30)], [1, 1],
lw=5, color='#cc0000'
)
ax.xaxis.set_major_formatter(pltdates.DateFormatter('%m/%d/%Y'))
ax.xaxis.set_major_locator(pltdates.DayLocator(bymonthday=(1, 15)))
plt.tight_layout()
if save_path is not None:
fig.savefig(save_path)
plt.close()
return ax
# FIGURE 2
def aic_heatmap(df, relative=False, lw=2, annot=True, lim_ticklabels=False,
title='', save_path=None):
'''
heatmap demonstrating relative likelihood of each model minimizing
information loss
Arguements:
df (pandas.DataFrame): data frame with the first and second
partition dates and an AIC column if relative=False and a
rl column if relative=True
relative (bool): see above
Returns:
(matplotlib.pyplot.Axes): Axes plotted to
'''
sns.set(font_scale=1.2)
cbar_kws = dict(label='Relative Likelihood, $\mathcal{L}_i$', size=16)
fig = plt.figure(figsize=(16, 9))
if relative:
val_col = 'rl'
fmt = '.2f'
df_rel = df.copy()
# min_aic = df_rel.AIC.min()
# df_rel = df_rel.rename(columns={'AIC': val_col})
# df_rel[val_col] = relative_likelihood(min_aic, df_rel[val_col])
if lim_ticklabels:
ax = sns.heatmap(
df_rel.pivot('first_date', 'last_date', val_col),
annot=annot, fmt=fmt, linewidths=lw, xticklabels=5,
yticklabels=5
)
else:
ax = sns.heatmap(
df_rel.pivot('first_date', 'last_date', val_col),
annot=annot, fmt=fmt, linewidths=lw
)
cbar = plt.gcf().axes[-1] # .colorbar(df_rel[val_col].values)
cbar.tick_params(labelsize=14)
cbar.set_title('Relative Likelihood, $\mathcal{L}_i$\n',
size=18, loc='left')
else:
fmt = '1.0f'
if lim_ticklabels:
ax = sns.heatmap(
df_rel.pivot('first_date', 'last_date', val_col),
annot=annot, fmt=fmt, linewidths=lw, xticklabels=5,
yticklabels=5, cbar_kws=cbar_kws
)
else:
ax = sns.heatmap(
df_rel.pivot('first_date', 'last_date', val_col),
annot=annot, fmt=fmt, linewidths=lw, cbar_kws=cbar_kws
)
ax.set_ylabel('Date frequency increased', size=20)
ax.set_xlabel('Date frequency returned to normal', size=20)
yt_labels = df_rel.first_date.dt.strftime('%m-%d').iloc[
[int(yt) for yt in ax.get_yticks()]
]
ax.set_yticklabels(yt_labels, rotation=0, size=15)
dates = df_rel.last_date.unique()[[int(xt) for xt in ax.get_xticks()]]
import ipdb
ipdb.set_trace()
xt_labels = [
dt.astype(datetime).strftime('%m-%d') for dt in dates
]
import ipdb
ipdb.set_trace()
ax.set_xticklabels(xt_labels, rotation=-30, ha='left', size=15)
ax.invert_yaxis()
plt.title(title, size=22)
plt.tight_layout()
if save_path is not None:
fig.savefig(save_path)
plt.close()
return ax
| bsd-3-clause |
hainm/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 57 | 16523 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False) | bsd-3-clause |
nsoojin/coursera-ml-py | machine-learning-ex7/ex7/displayData.py | 3 | 1427 | import matplotlib.pyplot as plt
import numpy as np
def display_data(x):
(m, n) = x.shape
# Set example_width automatically if not passed in
example_width = np.round(np.sqrt(n)).astype(int)
example_height = (n / example_width).astype(int)
# Compute the number of items to display
display_rows = np.floor(np.sqrt(m)).astype(int)
display_cols = np.ceil(m / display_rows).astype(int)
# Between images padding
pad = 1
# Setup blank display
display_array = - np.ones((pad + display_rows * (example_height + pad),
pad + display_rows * (example_height + pad)))
# Copy each example into a patch on the display array
curr_ex = 0
for j in range(display_rows):
for i in range(display_cols):
if curr_ex > m:
break
# Copy the patch
# Get the max value of the patch
max_val = np.max(np.abs(x[curr_ex]))
display_array[pad + j * (example_height + pad) + np.arange(example_height),
pad + i * (example_width + pad) + np.arange(example_width)[:, np.newaxis]] = \
x[curr_ex].reshape((example_height, example_width)) / max_val
curr_ex += 1
if curr_ex > m:
break
# Display image
plt.figure()
plt.imshow(display_array, cmap='gray', extent=[-1, 1, -1, 1])
plt.axis('off')
| mit |
AlCap23/Thesis | Python/Experiments/SISO/test.py | 1 | 4259 | '''
Use this script from terminal / console with
./python sisostudy.py --file_storage=my_runs
Will create an output with all the necessary information
'''
# Import the pacakges
# Numpy for numerical methods
import numpy as np
# Python Control for SISO creation etc.
import control as cn
# Pandas for Data Storage
import pandas as pd
# Import the Algorithms
import sys
sys.path.append('../../')
import Algorithms as alg
# Define an experiment
from sacred import Experiment
ex = Experiment()
# Configuration
@ex.config
def experimental_setup():
# Set up the Experiment and define the range of system gain, lag and delay as well as filename etc
filename = 'sisostudy.csv'
# Sample size per system order
sample_size = 10
# Maximum System Order
max_order = 4
# System noise
noise_limit = 0
# Gain Limits
gain_limits = [0.1,10]
# Lag Limits
lag_limits = [1,200]
# Delay Limits
delay_limits = [1e-2, 1e-3]
# Create the system numerator
N = np.random.uniform(gain_limits[0],gain_limits[1],(max_order,sample_size))
# Create the system denominator
D = np.random.uniform(lag_limits[0],lag_limits[1],(max_order,sample_size))
# Create the system delay
L = np.random.uniform(delay_limits[0], delay_limits[1],(max_order,sample_size))
# Create an array for the results: sys_no, order, K,T, k,t,l, ms_real,ms_ideal, 4x t_rise, mp, t_settle, yss
columns = ['Sample No.', 'Order', 'K', 'T', 'KM', 'TM', 'LM', 'MS Real', 'MS Ideal', 'Tr_RSW', 'Mp_RSW', 'Ts_RSW', 'Ys_RSW', 'Tr_R', 'Mp_R', 'Ts_R', 'Ys_R', 'Tr_ISW', 'Mp_ISW', 'Ts_ISW', 'Ys_ISW', 'Tr_I', 'Mp_I', 'Ts_I', 'Ys_I']
R = pd.DataFrame(columns=columns)
# Experimental Study
@ex.automain
def experiment(N,D,L,R,noise_limit,sample_size,max_order,filename, columns):
# Define system no
sys_no = 0
# Define an outer loop over system order
for order in range(1,max_order):
# Define an inner loop over samples
for sample in range(0,sample_size):
# Current System
sys_no += 1
# Get the denominator
den = [D[order][sample],1]
# Get the gain
num = N[order][sample]
# Get the delay
l = L[order][sample]
# Define a Transfer Function with pole multiplicity
G = cn.tf([1],den)
# Add gain and rise system order
G = num*G**order
# Add delay with pade approximation of order 10
num, den = cn.pade(l,10)
G = G*cn.tf(num,den)
# Step response
y,t = cn.step(G)
u = np.ones_like(t)
# Identify the system
km,tm,lm = alg.Integral_Identification(y,u,t)
# Make a model of the system
num,den = cn.pade(lm,10)
GM = cn.tf([km],[tm,1])*cn.tf(num,den)
# Tune AMIGO controller
params, b = alg.AMIGO_Tune(km,tm,lm)
# Define a Controller with setpoint
ky = cn.tf([params[0]],[1])+cn.tf([params[1]],[1,0])
kr = cn.tf([b*params[0]],[1])+cn.tf([params[1]],[1,0])
# REAL SYSTEM
# Real system closed loop, setpoint weight
real_clsw = cn.feedback(G,ky)*kr
# Real system closed loop, without setpoint weight
real_cl = cn.feedback(G*ky,1)
# Real system sensitivity
real_sens = 1/(1+G*ky)
# IDENTIFIED SYSTEM
# Identified system closed loop, setpoint weight
iden_clsw = cn.feedback(GM,ky)*kr
# Identified system closed loop, without setpoint weight
iden_cl = cn.feedback(GM*ky,1)
# Identified system sensitivity
iden_sens = 1/(1+GM*ky)
# Step response
y_rclsw,t_rclsw = cn.step(real_clsw)
y_rcl,t_rcl = cn.step(real_cl)
y_iclsw,t_iclsw = cn.step(iden_clsw)
y_icl, t_icl = cn.step(iden_cl)
# Compute the gain
# Define Frequency range
omega = np.logspace(-5,5,1000)
gain, phase, omega = cn.bode_plot(real_sens)
MS_Real = np.max(gain)
gain, phase, omega = cn.bode_plot(iden_sens)
MS_Iden = np.max(gain)
# Get the Step Information
Tr_RSW, Mp_RSW, Ts_RSW, Ys_RSW = alg.Step_Info(y_rclsw,t_rclsw)
Tr_R, Mp_R, Ts_R, Ys_R = alg.Step_Info(y_rcl,t_rcl)
Tr_ISW, Mp_ISW, Ts_ISW, Ys_ISW = alg.Step_Info(y_iclsw,t_iclsw)
Tr_I, Mp_I, Ts_I, Ys_I = alg.Step_Info(y_icl,t_icl)
# Append Data
R.loc[sys_no-1] = [sys_no, order, N[order][sample], D[order][sample], km, tm, lm, MS_Real, MS_Iden, Tr_RSW, Mp_RSW, Ts_RSW, Ys_RSW, Tr_R, Mp_R, Ts_R, Ys_R, Tr_ISW, Mp_ISW, Ts_ISW, Ys_ISW, Tr_I, Mp_I, Ts_I, Ys_I]
R.to_csv(filename, sep=';')
| gpl-3.0 |
phoebe-project/phoebe2-docs | 2.1/tutorials/vgamma.py | 1 | 9133 | #!/usr/bin/env python
# coding: utf-8
# Systemic Velocity
# ============================
#
# **NOTE:** the definition of the systemic velocity has been flipped between 2.0.x and 2.1.0+ to adhere to usual conventions. If importing a file from PHOEBE 2.0.x, the value should be flipped automatically, but if adopting an old script with non-zero systemic velocity, make sure the sign is correct.
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# As always, let's do imports and initialize a logger and a new Bundle. See [Building a System](../tutorials/building_a_system.html) for more details.
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# Now we'll create empty lc, rv, orb, and mesh datasets. We'll then look to see how the systemic velocity (vgamma) affects the observables in each of these datasets, and how those are also affected by light-time effects (ltte).
#
# To see the effects over long times, we'll compute one cycle starting at t=0, and another in the distant future.
# In[3]:
times1 = np.linspace(0,1,201)
times2 = np.linspace(90,91,201)
# In[4]:
b.add_dataset('lc', times=times1, dataset='lc1')
b.add_dataset('lc', times=times2, dataset='lc2')
# In[5]:
b.add_dataset('rv', times=times1, dataset='rv1')
b.add_dataset('rv', times=times2, dataset='rv2')
# In[6]:
b.add_dataset('orb', times=times1, dataset='orb1')
b.add_dataset('orb', times=times2, dataset='orb2')
# In[7]:
b.add_dataset('mesh', times=[0], dataset='mesh1', columns=['vws'])
b.add_dataset('mesh', times=[90], dataset='mesh2', columns=['vws'])
# Changing Systemic Velocity and LTTE
# ------------------------------------------------
# By default, vgamma is initially set to 0.0
# In[8]:
b['vgamma@system']
# We'll leave it set at 0.0 for now, and then change vgamma to see how that affects the observables.
#
# The other relevant parameter here is t0 - that is the time at which all quantities are provided, the time at which nbody integration would start (if applicable), and the time at which the center-of-mass of the system is defined to be at (0,0,0). Unless you have a reason to do otherwise, it makes sense to have this value near the start of your time data... so if we don't have any other changing quantities defined in our system and are using BJDs, we would want to set this to be non-zero. In this case, our times all start at 0, so we'll leave t0 at 0 as well.
# In[9]:
b['t0@system']
# The option to enable or disable LTTE are in the compute options, we can either set ltte or we can just temporarily pass a value when we call run_compute.
# In[10]:
b['ltte@compute']
# Let's first compute the model with 0 systemic velocity and ltte=False (not that it would matter in this case). Let's also name the model so we can keep track of what settings were used.
# In[11]:
b.run_compute(irrad_method='none', model='0_false')
# For our second model, we'll set a somewhat ridiculous value for the systemic velocity (so that the affects are exagerated and clearly visible over one orbit), but leave ltte off.
# In[12]:
b['vgamma@system'] = 100
# In[13]:
b.run_compute(irrad_method='none', model='100_false')
# Lastly, let's leave this value of vgamma, but enable light-time effects.
# In[14]:
b.run_compute(irrad_method='none', ltte=True, model='100_true')
# Influence on Light Curves (fluxes)
# -------------------------------------------
#
# Now let's compare the various models across all our different datasets.
#
# Let's set the colors so that all figures will have systemic velocity shown in blue, systemic velocity with ltte=False in red, and systemic velocity with ltte=True in green.
#
# In[15]:
colors = {'0_false': 'b', '100_false': 'r', '100_true': 'g'}
# In each of the figures below, the first panel will be the first cycle (days 0-3) and the second panel will be 100 cycles later (days 900-903).
#
# Without light-time effects, the light curve remains unchanged by the introduction of a systemic velocity (blue and red overlap each other). However, once ltte is enabled, the time between two eclipses (ie the observed period of the system) changes. This occurs because the path between the system and observer has changed. This is an important effect to note - the period parameter sets the TRUE period of the system, not necessarily the observed period between two successive eclipses.
# In[16]:
afig, mplfig = b['lc'].plot(c=colors, linestyle='solid',
axorder={'lc1': 0, 'lc2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
# Influence on Radial Velocities
# ------------------------------------
#
# Radial velocities are perhaps the most logical observable in the case of systemic velocities. Introducing a non-zero value for vgamma simply offsets the observed values.
#
# Light-time will have a similar affect on RVs as it does on LCs - it simply changes the observed period.
# In[17]:
afig, mplfig = b['rv'].plot(c=colors, linestyle='solid',
axorder={'rv1': 0, 'rv2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
# Influence on Orbits (positions, velocities)
# ----------------------
# In the orbit, the addition of a systemic velocity affects both the positions and velocities. So if we plot the orbits from above (u-w plane) we can see see orbit spiral in the w-direction. Note that this actually shows the barycenter of the orbit moving - and it was only at 0,0,0 at t0. This also stresses the importance of using a reasonable t0 - here 900 days later, the barycenter has moved significantly from the center of the coordinate system.
# In[18]:
afig, mplfig = b.filter(kind='orb', model=['0_false', '100_false']).plot(x='us', y='ws',
c=colors, linestyle='solid',
axorder={'orb1': 0, 'orb2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
# Plotting the w-velocities with respect to time would show the same as the RVs, except without any Rossiter-McLaughlin like effects. Note however the flip in w-convention between vw and radial velocities (+w is defined as towards the observer to make a right-handed system, but by convention +rv is a red shift).
# In[19]:
afig, mplfig = b.filter(kind='orb', model=['0_false', '100_false']).plot(x='times', y='vws',
c=colors, linestyle='solid',
axorder={'orb1': 0, 'orb2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
# Now let's look at the effect that enabling ltte has on these same plots.
# In[20]:
afig, mplfig = b.filter(kind='orb', model=['100_false', '100_true']).plot(x='us', y='ws',
c=colors, linestyle='solid',
axorder={'orb1': 0, 'orb2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
# In[21]:
afig, mplfig = b.filter(kind='orb', model=['100_false', '100_true']).plot(x='times', y='vws',
c=colors, linestyle='solid',
axorder={'orb1': 0, 'orb2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
# Influence on Meshes
# --------------------------
# In[22]:
afig, mplfig = b.filter(kind='mesh', model=['0_false', '100_false']).plot(x='us', y='ws',
axorder={'mesh1': 0, 'mesh2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
# In[23]:
afig, mplfig = b.filter(kind='mesh', model=['100_false', '100_true']).plot(x='us', y='ws',
axorder={'mesh1': 0, 'mesh2': 1},
subplot_grid=(1,2), tight_layout=True, show=True)
# As you can see, since the center of mass of the system was at 0,0,0 at t0 - including systemic velocity actually shows the system spiraling towards or away from the observer (who is in the positive w direction). In other words - the positions of the meshes are affected in the same way as the orbits (note the offset on the ylimit scales).
#
# In addition, the actual values of vw and rv in the meshes are adjusted to include the systemic velocity.
# In[24]:
b['primary@mesh1@0_false'].get_value('vws', time=0.0)[:5]
# In[25]:
b['primary@mesh1@100_false'].get_value('vws', time=0.0)[:5]
# In[26]:
b['primary@mesh1@100_true'].get_value('vws', time=0.0)[:5]
| gpl-3.0 |
eg-zhang/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 215 | 11427 | import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
childresslab/MicrocavityExp1 | tools/Pulse_analysis_standalone.py | 4 | 8606 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 16:35:51 2015
This file contains a class for standalone analysis of fast counter data.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (C) 2015 Nikolas Tomek [email protected]
"""
import numpy as np
from scipy import ndimage
from matplotlib.pyplot import plot
class PulseAnalysis():
def __init__(self):
self.is_counter_gated = False
# std. deviation of the gaussian filter.
#Too small and the filtered data is too noisy to analyze; too big and the pulse edges are filtered out...
self.conv_std_dev = 5
# set windows for signal and normalization of the laser pulses
self.signal_start_bin = 5
self.signal_width_bins = 200
self.norm_start_bin = 500
self.norm_width_bins = 200
# total number of laser pulses in the sequence
self.number_of_lasers = 50
# data arrays
self.tau_vector = np.array(range(50)) # tau values (x-axis)
self.signal_vector = np.zeros(self.number_of_lasers, dtype=float) # data points (y-axis)
self.laser_data = None # extracted laser pulses
def _gated_extraction(self, count_data):
""" This method detects the rising flank in the gated timetrace data and extracts just the laser pulses
@param 2D numpy.ndarray count_data: the raw timetrace data from a gated fast counter (dimensions 0: gate number, 1: time bin)
@return 2D numpy.ndarray: The extracted laser pulses of the timetrace (dimensions 0: laser number, 1: time bin)
"""
# sum up all gated timetraces to ease flank detection
timetrace_sum = np.sum(count_data, 0)
# apply gaussian filter to remove noise and compute the gradient of the timetrace sum
conv_deriv = self._convolve_derive(timetrace_sum, self.conv_std_dev)
# get indices of rising and falling flank
rising_ind = conv_deriv.argmax()
falling_ind = conv_deriv.argmin()
# slice the data array to cut off anything but laser pulses
laser_arr = count_data[:, rising_ind:falling_ind]
return laser_arr
def _ungated_extraction(self, count_data, num_of_lasers):
""" This method detects the laser pulses in the ungated timetrace data and extracts them
@param 1D numpy.ndarray count_data: the raw timetrace data from an ungated fast counter
@param int num_of_lasers: The total number of laser pulses inside the pulse sequence
@return 2D numpy.ndarray: The extracted laser pulses of the timetrace (dimensions 0: laser number, 1: time bin)
"""
# apply gaussian filter to remove noise and compute the gradient of the timetrace
conv_deriv = self._convolve_derive(count_data, self.conv_std_dev)
# initialize arrays to contain indices for all rising and falling flanks, respectively
rising_ind = np.empty([num_of_lasers],int)
falling_ind = np.empty([num_of_lasers],int)
# Find as many rising and falling flanks as there are laser pulses in the timetrace
for i in range(num_of_lasers):
# save the index of the absolute maximum of the derived timetrace as rising flank position
rising_ind[i] = np.argmax(conv_deriv)
# set this position and the sourrounding of the saved flank to 0 to avoid a second detection
if rising_ind[i] < 2*self.conv_std_dev:
del_ind_start = 0
else:
del_ind_start = rising_ind[i] - 2*self.conv_std_dev
if (conv_deriv.size - rising_ind[i]) < 2*self.conv_std_dev:
del_ind_stop = conv_deriv.size-1
else:
del_ind_stop = rising_ind[i] + 2*self.conv_std_dev
conv_deriv[del_ind_start:del_ind_stop] = 0
# save the index of the absolute minimum of the derived timetrace as falling flank position
falling_ind[i] = np.argmin(conv_deriv)
# set this position and the sourrounding of the saved flank to 0 to avoid a second detection
if falling_ind[i] < 2*self.conv_std_dev:
del_ind_start = 0
else:
del_ind_start = falling_ind[i] - 2*self.conv_std_dev
if (conv_deriv.size - falling_ind[i]) < 2*self.conv_std_dev:
del_ind_stop = conv_deriv.size-1
else:
del_ind_stop = falling_ind[i] + 2*self.conv_std_dev
conv_deriv[del_ind_start:del_ind_stop] = 0
# sort all indices of rising and falling flanks
rising_ind.sort()
falling_ind.sort()
# find the maximum laser length to use as size for the laser array
laser_length = np.max(falling_ind-rising_ind)
# initialize the empty output array
laser_arr = np.zeros([num_of_lasers, laser_length],int)
# slice the detected laser pulses of the timetrace and save them in the output array
for i in range(num_of_lasers):
if (rising_ind[i]+laser_length > count_data.size):
lenarr = count_data[rising_ind[i]:].size
laser_arr[i, 0:lenarr] = count_data[rising_ind[i]:]
else:
laser_arr[i] = count_data[rising_ind[i]:rising_ind[i]+laser_length]
return laser_arr
def _convolve_derive(self, data, std_dev):
""" This method smoothes the input data by applying a gaussian filter (convolution) with
specified standard deviation. The derivative of the smoothed data is computed afterwards and returned.
If the input data is some kind of rectangular signal containing high frequency noise,
the output data will show sharp peaks corresponding to the rising and falling flanks of the input signal.
@param 1D numpy.ndarray timetrace: the raw data to be smoothed and derived
@param float std_dev: standard deviation of the gaussian filter to be applied for smoothing
@return 1D numpy.ndarray: The smoothed and derived data
"""
conv = ndimage.filters.gaussian_filter1d(data, std_dev)
conv_deriv = np.gradient(conv)
return conv_deriv
def analyze_data(self, raw_data):
""" This method captures the fast counter data and extracts the laser pulses.
@param int num_of_lasers: The total number of laser pulses inside the pulse sequence
@return 2D numpy.ndarray: The extracted laser pulses of the timetrace (dimensions 0: laser number, 1: time bin)
@return 1D/2D numpy.ndarray: The raw timetrace from the fast counter
"""
# call appropriate laser extraction method depending on if the fast counter is gated or not.
if self.is_counter_gated:
self.laser_data = self._gated_extraction(raw_data)
else:
self.laser_data = self._ungated_extraction(raw_data, self.number_of_lasers)
#analyze data
norm_mean = np.zeros(self.number_of_lasers, dtype=float)
signal_mean = np.zeros(self.number_of_lasers, dtype=float)
# set start and stop indices for the analysis
norm_start = self.norm_start_bin
norm_end = self.norm_start_bin + self.norm_width_bins
signal_start = self.signal_start_bin
signal_end = self.signal_start_bin + self.signal_width_bins
# loop over all laser pulses and analyze them
for i in range(self.number_of_lasers):
# calculate the mean of the data in the normalization window
norm_mean[i] = self.laser_data[i][norm_start:norm_end].mean()
# calculate the mean of the data in the signal window
signal_mean[i] = (self.laser_data[i][signal_start:signal_end] - norm_mean[i]).mean()
# update the signal plot y-data
self.signal_vector[i] = 1. + (signal_mean[i]/norm_mean[i])
return
if __name__ == "__main__":
tool = PulseAnalysis()
data = np.loadtxt('FastComTec_demo_timetrace.asc')
tool.analyze_data(data)
plot(tool.tau_vector, tool.signal_vector) | gpl-3.0 |
drivendata/data-science-is-software | src/features/preprocess_solution.py | 1 | 3177 | #
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# DON'T CHEAT!!!!!!
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
from engarde.decorators import none_missing
@none_missing()
def clean_raw_data(df):
""" Takes a dataframe and performs four steps:
- Selects columns for modeling
- For numeric variables, replaces 0 values with mean for that region
- Fills invalid construction_year values with the mean construction_year
- Converts strings to categorical variables
:param df: A raw dataframe that has been read into pandas
:returns: A dataframe with the preprocessing performed.
"""
useful_columns = ['amount_tsh',
'gps_height',
'longitude',
'latitude',
'region',
'population',
'construction_year',
'extraction_type_class',
'management_group',
'quality_group',
'source_type',
'waterpoint_type',
'status_group']
zero_is_bad_value = ['longitude', 'population']
other_bad_value = ['latitude']
# subset to columns we care about
df = df[useful_columns]
for column, column_type in df.dtypes.iteritems():
# special case construction year
if column == 'construction_year':
invalid_rows = df.construction_year < 1000
valid_mean = int(df.construction_year[~invalid_rows].mean())
df.loc[invalid_rows, column] = valid_mean
# replace 0 values where they are not right
elif column in zero_is_bad_value:
df = replace_value_with_grouped_mean(df, 0, column, 'region')
elif column in other_bad_value:
df = replace_value_with_grouped_mean(df, -2e-8, column, 'region')
# strings to categoricals
elif column_type == "object":
df.loc[:, column] = df[column].astype('category')
return df
def replace_value_with_grouped_mean(df, value, column, to_groupby):
""" For a given numeric value (e.g., 0) in a particular column, take the
mean of column (excluding value) grouped by to_groupby and return that
column with the value replaced by that mean.
:param df: The dataframe to operate on.
:param value: The value in column that should be replaced.
:param column: The column in which replacements need to be made.
:param to_groupby: Groupby this variable and take the mean of column.
Replace value with the group's mean.
:returns: The data frame with the invalid values replaced
"""
invalid_mask = (df[column] == value)
# get the mean without the invalid value
means_by_group = (df[~invalid_mask]
.groupby(to_groupby)[column]
.mean())
# get an array of the means for all of the data
means_array = means_by_group[df[to_groupby].values].values
# assignt the invalid values to means
df.loc[invalid_mask, column] = means_array[invalid_mask]
return df
| mit |
yonglehou/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
xguse/scikit-bio | skbio/sequence/_sequence.py | 2 | 78197 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import range
from future.utils import viewitems
import six
import itertools
import math
import re
import collections
import copy
import numbers
import textwrap
from contextlib import contextmanager
import numpy as np
from scipy.spatial.distance import hamming
import pandas as pd
from skbio._base import SkbioObject
from skbio.sequence._base import ElasticLines
from skbio.util._misc import chunk_str
from skbio.util._decorator import stable, experimental
class Sequence(collections.Sequence, SkbioObject):
"""Store biological sequence data and optional associated metadata.
``Sequence`` objects do not enforce an alphabet and are thus the most
generic objects for storing biological sequence data. Subclasses ``DNA``,
``RNA``, and ``Protein`` enforce the IUPAC character set [1]_ for, and
provide operations specific to, each respective molecule type.
``Sequence`` objects consist of the underlying sequence data, as well
as optional metadata and positional metadata. The underlying sequence
is immutable, while the metdata and positional metadata are mutable.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the biological sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence. A shallow copy
of the ``dict`` will be made (see Examples section below for details).
positional_metadata : pd.DataFrame consumable, optional
Arbitrary per-character metadata (e.g., sequence read quality
scores). Must be able to be passed directly to ``pd.DataFrame``
constructor. Each column of metadata must be the same length as the
biological sequence. A shallow copy of the positional metadata will be
made if necessary (see Examples section below for details).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters. If ``False``, no characters will be converted.
If a str, it will be treated as a key into the positional metadata of
the object. All lowercase characters will be converted to uppercase,
and a ``True`` value will be stored in a boolean array in the
positional metadata under the key.
Attributes
----------
values
metadata
positional_metadata
See Also
--------
DNA
RNA
Protein
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
**Creating sequences:**
Create a sequence without any metadata:
>>> seq = Sequence('GGUCGUGAAGGA')
>>> seq
Sequence
---------------
Stats:
length: 12
---------------
0 GGUCGUGAAG GA
Create a sequence with metadata and positional metadata:
>>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
>>> positional_metadata = {'quality': [3, 3, 4, 10],
... 'exons': [True, True, False, True]}
>>> seq = Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
>>> seq
Sequence
-----------------------------
Metadata:
'authors': <class 'list'>
'desc': 'seq desc'
'id': 'seq-id'
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
-----------------------------
0 ACGT
**Retrieving underlying sequence data:**
Retrieve underlying sequence:
>>> seq.values # doctest: +NORMALIZE_WHITESPACE
array([b'A', b'C', b'G', b'T'],
dtype='|S1')
Underlying sequence immutable:
>>> seq.values = np.array([b'T', b'C', b'G', b'A'], dtype='|S1')
Traceback (most recent call last):
...
AttributeError: can't set attribute
>>> seq.values[0] = b'T'
Traceback (most recent call last):
...
ValueError: assignment destination is read-only
**Retrieving sequence metadata:**
Retrieve metadata:
>>> pprint(seq.metadata) # using pprint to display dict in sorted order
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 4
3 True 10
**Updating sequence metadata:**
.. warning:: Be aware that a shallow copy of ``metadata`` and
``positional_metadata`` is made for performance. Since a deep copy is
not made, changes made to mutable Python objects stored as metadata may
affect the metadata of other ``Sequence`` objects or anything else that
shares a reference to the object. The following examples illustrate this
behavior.
First, let's create a sequence and update its metadata:
>>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
>>> seq = Sequence('ACGT', metadata=metadata)
>>> seq.metadata['id'] = 'new-id'
>>> seq.metadata['pubmed'] = 12345
>>> pprint(seq.metadata)
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'new-id', 'pubmed': 12345}
Note that the original metadata dictionary (stored in variable
``metadata``) hasn't changed because a shallow copy was made:
>>> pprint(metadata)
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
>>> seq.metadata == metadata
False
Note however that since only a *shallow* copy was made, updates to mutable
objects will also change the original metadata dictionary:
>>> seq.metadata['authors'].append('Bob')
>>> seq.metadata['authors']
['Alice', 'Bob']
>>> metadata['authors']
['Alice', 'Bob']
This behavior can also occur when manipulating a sequence that has been
derived from another sequence:
>>> subseq = seq[1:3]
>>> subseq
Sequence
-----------------------------
Metadata:
'authors': <class 'list'>
'desc': 'seq desc'
'id': 'new-id'
'pubmed': 12345
Stats:
length: 2
-----------------------------
0 CG
>>> pprint(subseq.metadata)
{'authors': ['Alice', 'Bob'],
'desc': 'seq desc',
'id': 'new-id',
'pubmed': 12345}
The subsequence has inherited the metadata of its parent sequence. If we
update the subsequence's author list, we see the changes propagated in the
parent sequence and original metadata dictionary:
>>> subseq.metadata['authors'].append('Carol')
>>> subseq.metadata['authors']
['Alice', 'Bob', 'Carol']
>>> seq.metadata['authors']
['Alice', 'Bob', 'Carol']
>>> metadata['authors']
['Alice', 'Bob', 'Carol']
The behavior for updating positional metadata is similar. Let's create a
new sequence with positional metadata that is already stored in a
``pd.DataFrame``:
>>> positional_metadata = pd.DataFrame(
... {'quality': [3, 3, 4, 10], 'list': [[], [], [], []]})
>>> seq = Sequence('ACGT', positional_metadata=positional_metadata)
>>> seq
Sequence
-----------------------------
Positional metadata:
'list': <dtype: object>
'quality': <dtype: int64>
Stats:
length: 4
-----------------------------
0 ACGT
>>> seq.positional_metadata
list quality
0 [] 3
1 [] 3
2 [] 4
3 [] 10
Now let's update the sequence's positional metadata by adding a new column
and changing a value in another column:
>>> seq.positional_metadata['gaps'] = [False, False, False, False]
>>> seq.positional_metadata.loc[0, 'quality'] = 999
>>> seq.positional_metadata
list quality gaps
0 [] 999 False
1 [] 3 False
2 [] 4 False
3 [] 10 False
Note that the original positional metadata (stored in variable
``positional_metadata``) hasn't changed because a shallow copy was made:
>>> positional_metadata
list quality
0 [] 3
1 [] 3
2 [] 4
3 [] 10
>>> seq.positional_metadata.equals(positional_metadata)
False
Next let's create a sequence that has been derived from another sequence:
>>> subseq = seq[1:3]
>>> subseq
Sequence
-----------------------------
Positional metadata:
'list': <dtype: object>
'quality': <dtype: int64>
'gaps': <dtype: bool>
Stats:
length: 2
-----------------------------
0 CG
>>> subseq.positional_metadata
list quality gaps
0 [] 3 False
1 [] 4 False
As described above for metadata, since only a *shallow* copy was made of
the positional metadata, updates to mutable objects will also change the
parent sequence's positional metadata and the original positional metadata
``pd.DataFrame``:
>>> subseq.positional_metadata.loc[0, 'list'].append('item')
>>> subseq.positional_metadata
list quality gaps
0 [item] 3 False
1 [] 4 False
>>> seq.positional_metadata
list quality gaps
0 [] 999 False
1 [item] 3 False
2 [] 4 False
3 [] 10 False
>>> positional_metadata
list quality
0 [] 3
1 [item] 3
2 [] 4
3 [] 10
"""
# ASCII is built such that the difference between uppercase and lowercase
# is the 6th bit.
_ascii_invert_case_bit_offset = 32
_ascii_lowercase_boundary = 90
default_write_format = 'fasta'
__hash__ = None
@property
@stable(as_of="0.4.0")
def values(self):
"""Array containing underlying sequence characters.
Notes
-----
This property is not writeable.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('AACGA')
>>> s.values # doctest: +NORMALIZE_WHITESPACE
array([b'A', b'A', b'C', b'G', b'A'],
dtype='|S1')
"""
return self._bytes.view('|S1')
@property
@stable(as_of="0.4.0")
def metadata(self):
"""``dict`` containing metadata which applies to the entire sequence.
Notes
-----
This property can be set and deleted.
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
Create a sequence with metadata:
>>> s = Sequence('ACGTACGTACGTACGT',
... metadata={'id': 'seq-id',
... 'description': 'seq description'})
>>> s
Sequence
------------------------------------
Metadata:
'description': 'seq description'
'id': 'seq-id'
Stats:
length: 16
------------------------------------
0 ACGTACGTAC GTACGT
Retrieve metadata:
>>> pprint(s.metadata) # using pprint to display dict in sorted order
{'description': 'seq description', 'id': 'seq-id'}
Update metadata:
>>> s.metadata['id'] = 'new-id'
>>> s.metadata['pubmed'] = 12345
>>> pprint(s.metadata)
{'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
Set metadata:
>>> s.metadata = {'abc': 123}
>>> s.metadata
{'abc': 123}
Delete metadata:
>>> s.has_metadata()
True
>>> del s.metadata
>>> s.metadata
{}
>>> s.has_metadata()
False
"""
if self._metadata is None:
# not using setter to avoid copy
self._metadata = {}
return self._metadata
@metadata.setter
def metadata(self, metadata):
if not isinstance(metadata, dict):
raise TypeError("metadata must be a dict")
# shallow copy
self._metadata = metadata.copy()
@metadata.deleter
def metadata(self):
self._metadata = None
@property
@stable(as_of="0.4.0")
def positional_metadata(self):
"""``pd.DataFrame`` containing metadata on a per-character basis.
Notes
-----
This property can be set and deleted.
Examples
--------
Create a DNA sequence with positional metadata:
>>> from skbio import DNA
>>> seq = DNA(
... 'ACGT',
... positional_metadata={'quality': [3, 3, 20, 11],
... 'exons': [True, True, False, True]})
>>> seq
DNA
-----------------------------
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 50.00%
-----------------------------
0 ACGT
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 20
3 True 11
Update positional metadata:
>>> seq.positional_metadata['gaps'] = seq.gaps()
>>> seq.positional_metadata
exons quality gaps
0 True 3 False
1 True 3 False
2 False 20 False
3 True 11 False
Set positional metadata:
>>> seq.positional_metadata = {'degenerates': seq.degenerates()}
>>> seq.positional_metadata
degenerates
0 False
1 False
2 False
3 False
Delete positional metadata:
>>> seq.has_positional_metadata()
True
>>> del seq.positional_metadata
>>> seq.positional_metadata
Empty DataFrame
Columns: []
Index: [0, 1, 2, 3]
>>> seq.has_positional_metadata()
False
"""
if self._positional_metadata is None:
# not using setter to avoid copy
self._positional_metadata = pd.DataFrame(
index=np.arange(len(self)))
return self._positional_metadata
@positional_metadata.setter
def positional_metadata(self, positional_metadata):
try:
# copy=True to copy underlying data buffer
positional_metadata = pd.DataFrame(positional_metadata, copy=True)
except pd.core.common.PandasError as e:
raise TypeError('Positional metadata invalid. Must be consumable '
'by pd.DataFrame. Original pandas error message: '
'"%s"' % e)
num_rows = len(positional_metadata.index)
if num_rows != len(self):
raise ValueError(
"Number of positional metadata values (%d) must match the "
"number of characters in the sequence (%d)." %
(num_rows, len(self)))
positional_metadata.reset_index(drop=True, inplace=True)
self._positional_metadata = positional_metadata
@positional_metadata.deleter
def positional_metadata(self):
self._positional_metadata = None
@property
def _string(self):
return self._bytes.tostring()
@stable(as_of="0.4.0")
def __init__(self, sequence, metadata=None, positional_metadata=None,
lowercase=False):
if isinstance(sequence, np.ndarray):
if sequence.dtype == np.uint8:
self._set_bytes_contiguous(sequence)
elif sequence.dtype == '|S1':
sequence = sequence.view(np.uint8)
# Guarantee the sequence is an array (might be scalar before
# this).
if sequence.shape == ():
sequence = np.array([sequence], dtype=np.uint8)
self._set_bytes_contiguous(sequence)
else:
raise TypeError(
"Can only create sequence from numpy.ndarray of dtype "
"np.uint8 or '|S1'. Invalid dtype: %s" %
sequence.dtype)
elif isinstance(sequence, Sequence):
# we're not simply accessing sequence.metadata in order to avoid
# creating "empty" metadata representations on both sequence
# objects if they don't have metadata. same strategy is used below
# for positional metadata
if metadata is None and sequence.has_metadata():
metadata = sequence.metadata
if (positional_metadata is None and
sequence.has_positional_metadata()):
positional_metadata = sequence.positional_metadata
sequence = sequence._bytes
self._owns_bytes = False
self._set_bytes(sequence)
else:
# Python 3 will not raise a UnicodeEncodeError so we force it by
# encoding it as ascii
if isinstance(sequence, six.text_type):
sequence = sequence.encode("ascii")
s = np.fromstring(sequence, dtype=np.uint8)
# There are two possibilities (to our knowledge) at this point:
# Either the sequence we were given was something string-like,
# (else it would not have made it past fromstring), or it was a
# numpy scalar, and so our length must be 1.
if isinstance(sequence, np.generic) and len(s) != 1:
raise TypeError("Can cannot create a sequence with %r" %
type(sequence).__name__)
sequence = s
self._owns_bytes = True
self._set_bytes(sequence)
if metadata is None:
self._metadata = None
else:
self.metadata = metadata
if positional_metadata is None:
self._positional_metadata = None
else:
self.positional_metadata = positional_metadata
if lowercase is False:
pass
elif lowercase is True or isinstance(lowercase, six.string_types):
lowercase_mask = self._bytes > self._ascii_lowercase_boundary
self._convert_to_uppercase(lowercase_mask)
# If it isn't True, it must be a string_type
if not (lowercase is True):
self.positional_metadata[lowercase] = lowercase_mask
else:
raise TypeError("lowercase keyword argument expected a bool or "
"string, but got %s" % type(lowercase))
def _set_bytes_contiguous(self, sequence):
"""Munge the sequence data into a numpy array of dtype uint8."""
if not sequence.flags['C_CONTIGUOUS']:
# numpy doesn't support views of non-contiguous arrays. Since we're
# making heavy use of views internally, and users may also supply
# us with a view, make sure we *always* store a contiguous array to
# avoid hard-to-track bugs. See
# https://github.com/numpy/numpy/issues/5716
sequence = np.ascontiguousarray(sequence)
self._owns_bytes = True
else:
self._owns_bytes = False
self._set_bytes(sequence)
def _set_bytes(self, sequence):
sequence.flags.writeable = False
self._bytes = sequence
def _convert_to_uppercase(self, lowercase):
if np.any(lowercase):
with self._byte_ownership():
self._bytes[lowercase] ^= self._ascii_invert_case_bit_offset
@stable(as_of="0.4.0")
def __contains__(self, subsequence):
"""Determine if a subsequence is contained in the biological sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
The putative subsequence.
Returns
-------
bool
Indicates whether `subsequence` is contained in the biological
sequence.
Raises
------
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUGAAGGA')
>>> 'GGU' in s
True
>>> 'CCC' in s
False
"""
return self._munge_to_bytestring(subsequence, "in") in self._string
@stable(as_of="0.4.0")
def __eq__(self, other):
"""Determine if the biological sequence is equal to another.
Biological sequences are equal if they are *exactly* the same type and
their sequence characters, metadata, and positional metadata are the
same.
Parameters
----------
other : Sequence
Sequence to test for equality against.
Returns
-------
bool
Indicates whether the biological sequence is equal to `other`.
Examples
--------
Define two biological sequences that have the same underlying sequence
of characters:
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> t = Sequence('ACGT')
The two sequences are considered equal because they are the same type,
their underlying sequence of characters are the same, and their
optional metadata attributes (``metadata`` and ``positional_metadata``)
were not provided:
>>> s == t
True
>>> t == s
True
Define another biological sequence with a different sequence of
characters than the previous two biological sequences:
>>> u = Sequence('ACGA')
>>> u == t
False
Define a biological sequence with the same sequence of characters as
``u`` but with different metadata and positional metadata:
>>> v = Sequence('ACGA', metadata={'id': 'abc'},
... positional_metadata={'quality':[1, 5, 3, 3]})
The two sequences are not considered equal because their metadata and
positional metadata do not match:
>>> u == v
False
"""
# checks ordered from least to most expensive
if self.__class__ != other.__class__:
return False
# we're not simply comparing self.metadata to other.metadata in order
# to avoid creating "empty" metadata representations on the sequence
# objects if they don't have metadata. same strategy is used below for
# positional metadata
if self.has_metadata() and other.has_metadata():
if self.metadata != other.metadata:
return False
elif not (self.has_metadata() or other.has_metadata()):
# both don't have metadata
pass
else:
# one has metadata while the other does not
return False
if self._string != other._string:
return False
if self.has_positional_metadata() and other.has_positional_metadata():
if not self.positional_metadata.equals(other.positional_metadata):
return False
elif not (self.has_positional_metadata() or
other.has_positional_metadata()):
# both don't have positional metadata
pass
else:
# one has positional metadata while the other does not
return False
return True
@stable(as_of="0.4.0")
def __ne__(self, other):
"""Determine if the biological sequence is not equal to another.
Biological sequences are not equal if they are not *exactly* the same
type, or their sequence characters, metadata, or positional metadata
differ.
Parameters
----------
other : Sequence
Sequence to test for inequality against.
Returns
-------
bool
Indicates whether the biological sequence is not equal to `other`.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> t = Sequence('ACGT')
>>> s != t
False
>>> u = Sequence('ACGA')
>>> u != t
True
>>> v = Sequence('ACGA', metadata={'id': 'v'})
>>> u != v
True
"""
return not (self == other)
@stable(as_of="0.4.0")
def __getitem__(self, indexable):
"""Slice the biological sequence.
Parameters
----------
indexable : int, slice, iterable (int and slice), 1D array_like (bool)
The position(s) to return from the biological sequence. If
`indexable` is an iterable of integers, these are assumed to be
indices in the sequence to keep. If `indexable` is a 1D
``array_like`` of booleans, these are assumed to be the positions
in the sequence to keep.
Returns
-------
Sequence
New biological sequence containing the position(s) specified by
`indexable` in the current biological sequence. If quality scores
are present, they will be sliced in the same manner and included in
the returned biological sequence. ID and description are also
included.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUGAAGGA')
Obtain a single character from the biological sequence:
>>> s[1]
Sequence
-------------
Stats:
length: 1
-------------
0 G
Obtain a slice:
>>> s[7:]
Sequence
-------------
Stats:
length: 5
-------------
0 AAGGA
Obtain characters at the following indices:
>>> s[[3, 4, 7, 0, 3]]
Sequence
-------------
Stats:
length: 5
-------------
0 CGAGC
Obtain characters at positions evaluating to `True`:
>>> s = Sequence('GGUCG')
>>> index = [True, False, True, 'a' is 'a', False]
>>> s[index]
Sequence
-------------
Stats:
length: 3
-------------
0 GUC
"""
if (not isinstance(indexable, np.ndarray) and
((not isinstance(indexable, six.string_types)) and
hasattr(indexable, '__iter__'))):
indexable_ = indexable
indexable = np.asarray(indexable)
if indexable.dtype == object:
indexable = list(indexable_) # TODO: Don't blow out memory
if len(indexable) == 0:
# indexing with an empty list, so convert to ndarray and
# fall through to ndarray slicing below
indexable = np.asarray(indexable)
else:
seq = np.concatenate(
list(_slices_from_iter(self._bytes, indexable)))
index = _as_slice_if_single_index(indexable)
positional_metadata = None
if self.has_positional_metadata():
pos_md_slices = list(_slices_from_iter(
self.positional_metadata, index))
positional_metadata = pd.concat(pos_md_slices)
return self._to(sequence=seq,
positional_metadata=positional_metadata)
elif (isinstance(indexable, six.string_types) or
isinstance(indexable, bool)):
raise IndexError("Cannot index with %s type: %r" %
(type(indexable).__name__, indexable))
if (isinstance(indexable, np.ndarray) and
indexable.dtype == bool and
len(indexable) != len(self)):
raise IndexError("An boolean vector index must be the same length"
" as the sequence (%d, not %d)." %
(len(self), len(indexable)))
if isinstance(indexable, np.ndarray) and indexable.size == 0:
# convert an empty ndarray to a supported dtype for slicing a numpy
# array
indexable = indexable.astype(int)
seq = self._bytes[indexable]
positional_metadata = self._slice_positional_metadata(indexable)
return self._to(sequence=seq, positional_metadata=positional_metadata)
def _slice_positional_metadata(self, indexable):
if self.has_positional_metadata():
if _is_single_index(indexable):
index = _single_index_to_slice(indexable)
else:
index = indexable
return self.positional_metadata.iloc[index]
else:
return None
@stable(as_of="0.4.0")
def __len__(self):
"""Return the number of characters in the biological sequence.
Returns
-------
int
The length of the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> len(s)
4
"""
return self._bytes.size
@stable(as_of="0.4.0")
def __nonzero__(self):
"""Returns truth value (truthiness) of sequence.
Returns
-------
bool
True if length of sequence is greater than 0, else False.
Examples
--------
>>> from skbio import Sequence
>>> bool(Sequence(''))
False
>>> bool(Sequence('ACGT'))
True
"""
return len(self) > 0
@stable(as_of="0.4.0")
def __iter__(self):
"""Iterate over positions in the biological sequence.
Yields
------
Sequence
Single character subsequence, one for each position in the
sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> for c in s:
... str(c)
'G'
'G'
'U'
'C'
"""
for i in range(len(self)):
yield self[i]
@stable(as_of="0.4.0")
def __reversed__(self):
"""Iterate over positions in the biological sequence in reverse order.
Yields
------
Sequence
Single character subsequence, one for each position in the
sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> for c in reversed(s):
... str(c)
'C'
'U'
'G'
'G'
"""
return iter(self[::-1])
@stable(as_of="0.4.0")
def __str__(self):
"""Return biological sequence characters as a string.
Returns
-------
str
Sequence characters as a string. No metadata or positional
metadata will be included.
See Also
--------
sequence
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUAAAGGA', metadata={'id':'hello'})
>>> str(s)
'GGUCGUAAAGGA'
"""
return str(self._string.decode("ascii"))
@stable(as_of="0.4.0")
def __repr__(self):
r"""Return a string representation of the biological sequence object.
Representation includes:
* sequence type
* metadata keys and values: will display key/value if it is an
understood type, otherwise just the type will be displayed. If it is
an understood type whose representation is too long, just the type
will be displayed
* positional metadata: column names and column dtypes will be displayed
in the order they appear in the positional metadata ``pd.DataFrame``.
Column names (i.e., keys) follow the same display rules as metadata
keys
* sequence stats (e.g., length)
* up to five lines of chunked sequence data. Each line of chunked
sequence data displays the current position in the sequence
Returns
-------
str
String representation of the biological sequence object.
Notes
-----
Subclasses can override Sequence._repr_stats to provide custom
statistics.
Examples
--------
Short sequence without metadata:
>>> from skbio import Sequence
>>> Sequence('ACGTAATGGATACGTAATGCA')
Sequence
-------------------------
Stats:
length: 21
-------------------------
0 ACGTAATGGA TACGTAATGC A
Longer sequence displays first two lines and last two lines:
>>> Sequence('ACGT' * 100)
Sequence
---------------------------------------------------------------------
Stats:
length: 400
---------------------------------------------------------------------
0 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
60 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
...
300 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
360 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
Sequence with metadata and positional metadata:
>>> metadata = {
... 'id': 'seq-id',
... 'description': 'description of the sequence, wrapping across '
... 'lines if it\'s too long',
... 'authors': ['Alice', 'Bob', 'Carol'],
... 'year': 2015,
... 'published': True
... }
>>> positional_metadata = {
... 'quality': [3, 10, 11, 10],
... 'exons': [True, True, False, True]
... }
>>> Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
Sequence
----------------------------------------------------------------------
Metadata:
'authors': <class 'list'>
'description': "description of the sequence, wrapping across lines
if it's too long"
'id': 'seq-id'
'published': True
'year': 2015
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
----------------------------------------------------------------------
0 ACGT
"""
return _SequenceReprBuilder(
seq=self,
width=71, # 79 for pep8, 8 space indent for docstrings
indent=4,
chunk_size=10).build()
def _repr_stats(self):
"""Define statistics to display in the sequence's repr.
Subclasses can override this method to provide type-specific
statistics.
This method computes a single statistic: length.
Returns
-------
list
List of tuples where each tuple represents a statistic. Each tuple
contains exactly two ``str`` elements: the statistic's name/label,
and the str-formatted value of the statistic. Ordering of
statistics (i.e., list order) determines display order in the
sequence repr.
"""
return [('length', '%d' % len(self))]
@stable(as_of="0.4.0")
def __copy__(self):
"""Return a shallow copy of the biological sequence.
See Also
--------
copy
Notes
-----
This method is equivalent to ``seq.copy(deep=False)``.
"""
return self.copy(deep=False)
@stable(as_of="0.4.0")
def __deepcopy__(self, memo):
"""Return a deep copy of the biological sequence.
See Also
--------
copy
Notes
-----
This method is equivalent to ``seq.copy(deep=True)``.
"""
return self._copy(True, memo)
@stable(as_of="0.4.0")
def has_metadata(self):
"""Determine if the sequence contains metadata.
Returns
-------
bool
Indicates whether the sequence has metadata
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_metadata()
False
>>> t = DNA('ACACGACGTT', metadata={'id': 'seq-id'})
>>> t.has_metadata()
True
"""
return self._metadata is not None and bool(self.metadata)
@stable(as_of="0.4.0")
def has_positional_metadata(self):
"""Determine if the sequence contains positional metadata.
Returns
-------
bool
Indicates whether the sequence has positional metadata
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_positional_metadata()
False
>>> t = DNA('ACACGACGTT', positional_metadata={'quality': range(10)})
>>> t.has_positional_metadata()
True
"""
return (self._positional_metadata is not None and
len(self.positional_metadata.columns) > 0)
@stable(as_of="0.4.0")
def copy(self, deep=False):
"""Return a copy of the biological sequence.
Parameters
----------
deep : bool, optional
Perform a deep copy. If ``False``, perform a shallow copy.
Returns
-------
Sequence
Copy of the biological sequence.
Notes
-----
Since sequence objects can share the same underlying immutable sequence
data (or pieces of it), this method can be used to create a sequence
object with its own copy of the sequence data so that the original
sequence data can be garbage-collected.
Examples
--------
Create a sequence:
>>> from pprint import pprint
>>> from skbio import Sequence
>>> seq = Sequence('ACGT',
... metadata={'id': 'seq-id', 'authors': ['Alice']},
... positional_metadata={'quality': [7, 10, 8, 5],
... 'list': [[], [], [], []]})
Make a shallow copy of the sequence:
>>> seq_copy = seq.copy()
>>> seq_copy == seq
True
Setting new references in the copied sequence's metadata doesn't affect
the original sequence's metadata:
>>> seq_copy.metadata['id'] = 'new-id'
>>> pprint(seq_copy.metadata)
{'authors': ['Alice'], 'id': 'new-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice'], 'id': 'seq-id'}
The same applies to the sequence's positional metadata:
>>> seq_copy.positional_metadata.loc[0, 'quality'] = 999
>>> seq_copy.positional_metadata
list quality
0 [] 999
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [] 7
1 [] 10
2 [] 8
3 [] 5
Since only a *shallow* copy was made, updates to mutable objects stored
as metadata affect the original sequence's metadata:
>>> seq_copy.metadata['authors'].append('Bob')
>>> pprint(seq_copy.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'new-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'seq-id'}
The same applies to the sequence's positional metadata:
>>> seq_copy.positional_metadata.loc[0, 'list'].append(1)
>>> seq_copy.positional_metadata
list quality
0 [1] 999
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [1] 7
1 [] 10
2 [] 8
3 [] 5
Perform a deep copy to avoid this behavior:
>>> seq_deep_copy = seq.copy(deep=True)
Updates to mutable objects no longer affect the original sequence's
metadata:
>>> seq_deep_copy.metadata['authors'].append('Carol')
>>> pprint(seq_deep_copy.metadata)
{'authors': ['Alice', 'Bob', 'Carol'], 'id': 'seq-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'seq-id'}
Nor its positional metadata:
>>> seq_deep_copy.positional_metadata.loc[0, 'list'].append(2)
>>> seq_deep_copy.positional_metadata
list quality
0 [1, 2] 7
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [1] 7
1 [] 10
2 [] 8
3 [] 5
"""
return self._copy(deep, {})
def _copy(self, deep, memo):
# strategy: copy the sequence without metadata first, then set metadata
# attributes with copies. we take this approach instead of simply
# passing the metadata through the Sequence constructor because we
# don't want to copy twice (this could happen when deep=True, where we
# deep copy here and then shallow copy in the Sequence constructor). we
# also directly set the private metadata attributes instead of using
# their public setters to avoid an unnecessary copy
# we don't make a distinction between deep vs. shallow copy of bytes
# because dtype=np.uint8. we only need to make the distinction when
# dealing with object dtype
bytes = np.copy(self._bytes)
seq_copy = self._constructor(sequence=bytes, metadata=None,
positional_metadata=None)
if self.has_metadata():
metadata = self.metadata
if deep:
metadata = copy.deepcopy(metadata, memo)
else:
metadata = metadata.copy()
seq_copy._metadata = metadata
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
if deep:
positional_metadata = copy.deepcopy(positional_metadata, memo)
else:
# deep=True makes a shallow copy of the underlying data buffer
positional_metadata = positional_metadata.copy(deep=True)
seq_copy._positional_metadata = positional_metadata
return seq_copy
@stable(as_of='0.4.0')
def lowercase(self, lowercase):
"""Return a case-sensitive string representation of the sequence.
Parameters
----------
lowercase: str or boolean vector
If lowercase is a boolean vector, it is used to set sequence
characters to lowercase in the output string. True values in the
boolean vector correspond to lowercase characters. If lowercase
is a str, it is treated like a key into the positional metadata,
pointing to a column which must be a boolean vector.
That boolean vector is then used as described previously.
Returns
-------
str
String representation of sequence with specified characters set to
lowercase.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> s.lowercase([True, True, False, False])
'acGT'
>>> s = Sequence('ACGT',
... positional_metadata={
... 'exons': [True, False, False, True]})
>>> s.lowercase('exons')
'aCGt'
Constructor automatically populates a column in positional metadata
when the ``lowercase`` keyword argument is provided with a column name:
>>> s = Sequence('ACgt', lowercase='introns')
>>> s.lowercase('introns')
'ACgt'
>>> s = Sequence('ACGT', lowercase='introns')
>>> s.lowercase('introns')
'ACGT'
"""
index = self._munge_to_index_array(lowercase)
outbytes = self._bytes.copy()
outbytes[index] ^= self._ascii_invert_case_bit_offset
return str(outbytes.tostring().decode('ascii'))
@stable(as_of="0.4.0")
def count(self, subsequence, start=None, end=None):
"""Count occurrences of a subsequence in the biological sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Subsequence to count occurrences of.
start : int, optional
The position at which to start counting (inclusive).
end : int, optional
The position at which to stop counting (exclusive).
Returns
-------
int
Number of occurrences of `subsequence` in the biological sequence.
Raises
------
ValueError
If `subsequence` is of length 0.
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCG')
>>> s.count('G')
3
>>> s.count('GG')
1
>>> s.count('T')
0
>>> s.count('G', 2, 5)
1
"""
if len(subsequence) == 0:
raise ValueError("`count` is not defined for empty subsequences.")
return self._string.count(
self._munge_to_bytestring(subsequence, "count"), start, end)
@stable(as_of="0.4.0")
def index(self, subsequence, start=None, end=None):
"""Find position where subsequence first occurs in the sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Subsequence to search for in the biological sequence.
start : int, optional
The position at which to start searching (inclusive).
end : int, optional
The position at which to stop searching (exclusive).
Returns
-------
int
Position where `subsequence` first occurs in the biological
sequence.
Raises
------
ValueError
If `subsequence` is not present in the biological sequence.
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACACGACGTT-')
>>> s.index('ACG')
2
"""
try:
return self._string.index(
self._munge_to_bytestring(subsequence, "index"), start, end)
except ValueError:
raise ValueError(
"%r is not present in %r." % (subsequence, self))
@experimental(as_of="0.4.0")
def distance(self, other, metric=None):
"""Compute the distance to another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compute the distance to.
metric : function, optional
Function used to compute the distance between the biological
sequence and `other`. If ``None`` (the default),
``scipy.spatial.distance.hamming`` will be used. This function
should take two ``skbio.Sequence`` objects and return a ``float``.
Returns
-------
float
Distance between the biological sequence and `other`.
Raises
------
ValueError
If the sequences are not the same length when `metric` is ``None``
(i.e., `metric` is ``scipy.spatial.distance.hamming``). This is
only checked when using this metric, as equal length is not a
requirement of all sequence distance metrics. In general, the
metric itself should test and give an informative error message,
but the message from ``scipy.spatial.distance.hamming`` is somewhat
cryptic (as of this writing), and it's the default metric, so we
explicitly do this check here. This metric-specific check will be
removed from this method when the ``skbio.sequence.stats`` module
is created (track progress on issue #913).
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
fraction_diff
fraction_same
scipy.spatial.distance.hamming
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.distance(t)
0.25
>>> def custom_dist(s1, s2): return 0.42
>>> s.distance(t, custom_dist)
0.42
"""
# TODO refactor this method to accept a name (string) of the distance
# metric to apply and accept **kwargs
other = self._munge_to_sequence(other, 'distance')
if metric is None:
return self._hamming(other)
return float(metric(self, other))
def _hamming(self, other):
# Hamming requires equal length sequences. We are checking this
# here because the error you would get otherwise is cryptic.
if len(self) != len(other):
raise ValueError(
"Sequences do not have equal length. "
"Hamming distances can only be computed between "
"sequences of equal length.")
return float(hamming(self.values, other.values))
@stable(as_of="0.4.0")
def matches(self, other):
"""Find positions that match with another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` at position ``i`` indicates a match
between the sequences at their positions ``i``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
mismatches
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('GAUU')
>>> s.matches(t)
array([ True, False, True, False], dtype=bool)
"""
other = self._munge_to_sequence(other, 'matches/mismatches')
if len(self) != len(other):
raise ValueError("Match and mismatch vectors can only be "
"generated from equal length sequences.")
return self._bytes == other._bytes
@stable(as_of="0.4.0")
def mismatches(self, other):
"""Find positions that do not match with another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` at position ``i`` indicates a
mismatch between the sequences at their positions ``i``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
matches
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('GAUU')
>>> s.mismatches(t)
array([False, True, False, True], dtype=bool)
"""
return np.invert(self.matches(other))
@stable(as_of="0.4.0")
def match_frequency(self, other, relative=False):
"""Return count of positions that are the same between two sequences.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
relative : bool, optional
If ``True``, return the relative frequency of matches instead of
the count.
Returns
-------
int or float
Number of positions that are the same between the sequences. This
will be an ``int`` if `relative` is ``False`` and a ``float``
if `relative` is ``True``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
mismatch_frequency
matches
mismatches
distance
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.match_frequency(t)
3
>>> s.match_frequency(t, relative=True)
0.75
"""
if relative:
return float(self.matches(other).mean())
else:
return int(self.matches(other).sum())
@stable(as_of="0.4.0")
def mismatch_frequency(self, other, relative=False):
"""Return count of positions that differ between two sequences.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
relative : bool, optional
If ``True``, return the relative frequency of mismatches instead of
the count.
Returns
-------
int or float
Number of positions that differ between the sequences. This will be
an ``int`` if `relative` is ``False`` and a ``float``
if `relative` is ``True``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
match_frequency
matches
mismatches
distance
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.mismatch_frequency(t)
1
>>> s.mismatch_frequency(t, relative=True)
0.25
"""
if relative:
return float(self.mismatches(other).mean())
else:
return int(self.mismatches(other).sum())
@stable(as_of="0.4.0")
def iter_kmers(self, k, overlap=True):
"""Generate kmers of length `k` from the biological sequence.
Parameters
----------
k : int
The kmer length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
Yields
------
Sequence
kmer of length `k` contained in the biological sequence.
Raises
------
ValueError
If `k` is less than 1.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACACGACGTT')
>>> for kmer in s.iter_kmers(4, overlap=False):
... str(kmer)
'ACAC'
'GACG'
>>> for kmer in s.iter_kmers(3, overlap=True):
... str(kmer)
'ACA'
'CAC'
'ACG'
'CGA'
'GAC'
'ACG'
'CGT'
'GTT'
"""
if k < 1:
raise ValueError("k must be greater than 0.")
if overlap:
step = 1
count = len(self) - k + 1
else:
step = k
count = len(self) // k
if self.has_positional_metadata():
for i in range(0, len(self) - k + 1, step):
yield self[i:i+k]
# Optimized path when no positional metadata
else:
kmers = np.lib.stride_tricks.as_strided(
self._bytes, shape=(k, count), strides=(1, step)).T
for s in kmers:
yield self._to(sequence=s)
@stable(as_of="0.4.0")
def kmer_frequencies(self, k, overlap=True, relative=False):
"""Return counts of words of length `k` from the biological sequence.
Parameters
----------
k : int
The word length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
relative : bool, optional
If ``True``, return the relative frequency of each kmer instead of
its count.
Returns
-------
collections.Counter or collections.defaultdict
Frequencies of words of length `k` contained in the biological
sequence. This will be a ``collections.Counter`` if `relative` is
``False`` and a ``collections.defaultdict`` if `relative` is
``True``.
Raises
------
ValueError
If `k` is less than 1.
Examples
--------
>>> from collections import defaultdict, Counter
>>> from skbio import Sequence
>>> s = Sequence('ACACATTTATTA')
>>> freqs = s.kmer_frequencies(3, overlap=False)
>>> freqs == Counter({'TTA': 2, 'ACA': 1, 'CAT': 1})
True
>>> freqs = s.kmer_frequencies(3, relative=True, overlap=False)
>>> freqs == defaultdict(float, {'ACA': 0.25, 'TTA': 0.5, 'CAT': 0.25})
True
"""
kmers = self.iter_kmers(k, overlap=overlap)
freqs = collections.Counter((str(seq) for seq in kmers))
if relative:
if overlap:
num_kmers = len(self) - k + 1
else:
num_kmers = len(self) // k
relative_freqs = collections.defaultdict(float)
for kmer, count in viewitems(freqs):
relative_freqs[kmer] = count / num_kmers
freqs = relative_freqs
return freqs
@stable(as_of="0.4.0")
def find_with_regex(self, regex, ignore=None):
"""Generate slices for patterns matched by a regular expression.
Parameters
----------
regex : str or regular expression object
String to be compiled into a regular expression, or a pre-
compiled regular expression object (e.g., from calling
``re.compile``).
ignore : 1D array_like (bool) or iterable (slices or ints), optional
Indicate the positions to ignore when matching.
Yields
------
slice
Location where the regular expression matched.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('AATATACCGGTTATAA')
>>> for match in s.find_with_regex('(TATA+)'):
... match
... str(s[match])
slice(2, 6, None)
'TATA'
slice(11, 16, None)
'TATAA'
"""
if isinstance(regex, six.string_types):
regex = re.compile(regex)
lookup = np.arange(len(self))
if ignore is None:
string = str(self)
else:
ignore = self._munge_to_index_array(ignore)
lookup = np.delete(lookup, ignore)
string = str(self[lookup])
for match in regex.finditer(string):
# We start at 1 because we don't want the group that contains all
# other groups.
for g in range(1, len(match.groups())+1):
yield slice(lookup[match.start(g)],
lookup[match.end(g) - 1] + 1)
@stable(as_of="0.4.0")
def iter_contiguous(self, included, min_length=1, invert=False):
"""Yield contiguous subsequences based on `included`.
Parameters
----------
included : 1D array_like (bool) or iterable (slices or ints)
`included` is transformed into a flat boolean vector where each
position will either be included or skipped. All contiguous
included positions will be yielded as a single region.
min_length : int, optional
The minimum length of a subsequence for it to be yielded.
Default is 1.
invert : bool, optional
Whether to invert `included` such that it describes what should be
skipped instead of included. Default is False.
Yields
------
Sequence
Contiguous subsequence as indicated by `included`.
Notes
-----
If slices provide adjacent ranges, then they will be considered the
same contiguous subsequence.
Examples
--------
Here we use `iter_contiguous` to find all of the contiguous ungapped
sequences using a boolean vector derived from our DNA sequence.
>>> from skbio import DNA
>>> s = DNA('AAA--TT-CCCC-G-')
>>> no_gaps = ~s.gaps()
>>> for ungapped_subsequence in s.iter_contiguous(no_gaps,
... min_length=2):
... print(ungapped_subsequence)
AAA
TT
CCCC
Note how the last potential subsequence was skipped because it would
have been smaller than our `min_length` which was set to 2.
We can also use `iter_contiguous` on a generator of slices as is
produced by `find_motifs` (and `find_with_regex`).
>>> from skbio import Protein
>>> s = Protein('ACDFNASANFTACGNPNRTESL')
>>> for subseq in s.iter_contiguous(s.find_motifs('N-glycosylation')):
... print(subseq)
NASANFTA
NRTE
Note how the first subsequence contains two N-glycosylation sites. This
happened because they were contiguous.
"""
idx = self._munge_to_index_array(included)
if invert:
idx = np.delete(np.arange(len(self)), idx)
# Adapted from http://stackoverflow.com/a/7353335/579416
for contig in np.split(idx, np.where(np.diff(idx) != 1)[0] + 1):
r = self[contig]
if len(r) >= min_length:
yield r
def _to(self, sequence=None, metadata=None, positional_metadata=None):
"""Return a copy of the current biological sequence.
Returns a copy of the current biological sequence, optionally with
updated attributes specified as keyword arguments.
Arguments are the same as those passed to the ``Sequence`` constructor.
The returned copy will have its attributes updated based on the
arguments. If an attribute is missing, the copy will keep the same
attribute as the current biological sequence. Valid attribute names
are `'sequence'`, `'metadata'`, and `'positional_metadata'`. Default
behavior is to return a copy of the current biological sequence
without changing any attributes.
Parameters
----------
sequence : optional
metadata : optional
positional_metadata : optional
Returns
-------
Sequence
Copy of the current biological sequence, optionally with updated
attributes based on arguments. Will be the same type as the current
biological sequence (`self`).
Notes
-----
By default, `metadata` and `positional_metadata` are shallow-copied and
the reference to `sequence` is used (without copying) for efficiency
since `sequence` is immutable. This differs from the behavior of
`Sequence.copy`, which will actually copy `sequence`.
This method is the preferred way of creating new instances from an
existing biological sequence, instead of calling
``self.__class__(...)``, as the latter can be error-prone (e.g.,
it's easy to forget to propagate attributes to the new instance).
"""
if sequence is None:
sequence = self._bytes
if metadata is None:
metadata = self._metadata
if positional_metadata is None:
positional_metadata = self._positional_metadata
return self._constructor(sequence=sequence, metadata=metadata,
positional_metadata=positional_metadata)
def _constructor(self, **kwargs):
return self.__class__(**kwargs)
def _munge_to_index_array(self, sliceable):
"""Return an index array from something isomorphic to a boolean vector.
"""
if isinstance(sliceable, six.string_types):
if sliceable in self.positional_metadata:
if self.positional_metadata[sliceable].dtype == np.bool:
sliceable = self.positional_metadata[sliceable]
else:
raise TypeError("Column '%s' in positional metadata does "
"not correspond to a boolean vector" %
sliceable)
else:
raise ValueError("No positional metadata associated with key "
"'%s'" % sliceable)
if not hasattr(sliceable, 'dtype') or (hasattr(sliceable, 'dtype') and
sliceable.dtype == 'object'):
sliceable = tuple(sliceable)
bool_mode = False
int_mode = False
for s in sliceable:
if isinstance(s, (bool, np.bool_)):
bool_mode = True
elif isinstance(s, (slice, int, np.signedinteger)) or (
hasattr(s, 'dtype') and s.dtype != np.bool):
int_mode = True
else:
raise TypeError("Invalid type in iterable: %s, must be one"
" of {bool, int, slice, np.signedinteger}"
% s.__class__.__name__)
if bool_mode and int_mode:
raise TypeError("Cannot provide iterable of both bool and"
" int.")
sliceable = np.r_[sliceable]
if sliceable.dtype == np.bool:
if sliceable.size != len(self):
raise ValueError("Boolean array (%d) does not match length of"
" sequence (%d)."
% (sliceable.size, len(self)))
normalized, = np.where(sliceable)
else:
normalized = np.bincount(sliceable)
if np.any(normalized > 1):
raise ValueError("Overlapping index regions are not allowed.")
normalized, = np.where(normalized)
if np.any(normalized != sliceable):
raise ValueError("Index regions are out of order.")
return normalized
def _munge_to_sequence(self, other, method):
if isinstance(other, Sequence):
if type(other) != type(self):
raise TypeError("Cannot use %s and %s together with `%s`" %
(self.__class__.__name__,
other.__class__.__name__, method))
else:
return other
# We don't use self.__class__ or self._constructor here because we want
# to construct the most general type of Sequence object in order to
# avoid validation errors.
return Sequence(other)
def _munge_to_bytestring(self, other, method):
if type(other) is bytes:
return other
elif isinstance(other, six.string_types):
return other.encode('ascii')
else:
return self._munge_to_sequence(other, method)._string
@contextmanager
def _byte_ownership(self):
if not self._owns_bytes:
self._bytes = self._bytes.copy()
self._owns_bytes = True
self._bytes.flags.writeable = True
yield
self._bytes.flags.writeable = False
def _single_index_to_slice(start_index):
end_index = None if start_index == -1 else start_index+1
return slice(start_index, end_index)
def _is_single_index(index):
return (isinstance(index, numbers.Integral) and
not isinstance(index, bool))
def _as_slice_if_single_index(indexable):
if _is_single_index(indexable):
return _single_index_to_slice(indexable)
else:
return indexable
def _slices_from_iter(array, indexables):
for i in indexables:
if isinstance(i, slice):
pass
elif _is_single_index(i):
i = _single_index_to_slice(i)
else:
raise IndexError("Cannot slice sequence from iterable "
"containing %r." % i)
yield array[i]
class _SequenceReprBuilder(object):
"""Build a ``Sequence`` repr.
Parameters
----------
seq : Sequence
Sequence to repr.
width : int
Maximum width of the repr.
indent : int
Number of spaces to use for indented lines.
chunk_size: int
Number of characters in each chunk of a sequence.
"""
def __init__(self, seq, width, indent, chunk_size):
self._seq = seq
self._width = width
self._indent = ' ' * indent
self._chunk_size = chunk_size
def build(self):
lines = ElasticLines()
cls_name = self._seq.__class__.__name__
lines.add_line(cls_name)
lines.add_separator()
if self._seq.has_metadata():
lines.add_line('Metadata:')
# Python 3 doesn't allow sorting of mixed types so we can't just
# use sorted() on the metadata keys. Sort first by type then sort
# by value within each type.
for key in self._sorted_keys_grouped_by_type(self._seq.metadata):
value = self._seq.metadata[key]
lines.add_lines(self._format_metadata_key_value(key, value))
if self._seq.has_positional_metadata():
lines.add_line('Positional metadata:')
for key in self._seq.positional_metadata.columns.values.tolist():
dtype = self._seq.positional_metadata[key].dtype
lines.add_lines(
self._format_positional_metadata_column(key, dtype))
lines.add_line('Stats:')
for label, value in self._seq._repr_stats():
lines.add_line('%s%s: %s' % (self._indent, label, value))
lines.add_separator()
num_lines, num_chars, column_width = self._find_optimal_seq_chunking()
# display entire sequence if we can, else display the first two and
# last two lines separated by ellipsis
if num_lines <= 5:
lines.add_lines(self._format_chunked_seq(
range(num_lines), num_chars, column_width))
else:
lines.add_lines(self._format_chunked_seq(
range(2), num_chars, column_width))
lines.add_line('...')
lines.add_lines(self._format_chunked_seq(
range(num_lines - 2, num_lines), num_chars, column_width))
return lines.to_str()
def _sorted_keys_grouped_by_type(self, dict_):
"""Group keys within a dict by their type and sort within type."""
type_sorted = sorted(dict_, key=self._type_sort_key)
type_and_value_sorted = []
for _, group in itertools.groupby(type_sorted, self._type_sort_key):
type_and_value_sorted.extend(sorted(group))
return type_and_value_sorted
def _type_sort_key(self, key):
return repr(type(key))
def _format_metadata_key_value(self, key, value):
"""Format metadata key:value, wrapping across lines if necessary."""
key_fmt = self._format_key(key)
supported_type = True
if isinstance(value, (six.text_type, six.binary_type)):
# for stringy values, there may be u'' or b'' depending on the type
# of `value` and version of Python. find the starting quote
# character so that wrapped text will line up with that instead of
# the string literal prefix character. for example:
#
# 'foo': u'abc def ghi
# jkl mno'
value_repr = repr(value)
extra_indent = 1
if not (value_repr.startswith("'") or value_repr.startswith('"')):
extra_indent = 2
# handles any number, this includes bool
elif value is None or isinstance(value, numbers.Number):
value_repr = repr(value)
extra_indent = 0
else:
supported_type = False
if not supported_type or len(value_repr) > 140:
value_repr = str(type(value))
# extra indent of 1 so that wrapped text lines up past the bracket:
#
# 'foo': <type
# 'dict'>
extra_indent = 1
return self._wrap_text_with_indent(value_repr, key_fmt, extra_indent)
def _format_key(self, key):
"""Format metadata key.
Includes initial indent and trailing colon and space:
<indent>'foo':<space>
"""
key_fmt = self._indent + repr(key)
supported_types = (six.text_type, six.binary_type, numbers.Number,
type(None))
if len(key_fmt) > (self._width / 2) or not isinstance(key,
supported_types):
key_fmt = self._indent + str(type(key))
return '%s: ' % key_fmt
def _wrap_text_with_indent(self, text, initial_text, extra_indent):
"""Wrap text across lines with an initial indentation.
For example:
'foo': 'abc def
ghi jkl
mno pqr'
<indent>'foo':<space> is `initial_text`. `extra_indent` is 1. Wrapped
lines are indented such that they line up with the start of the
previous line of wrapped text.
"""
return textwrap.wrap(
text, width=self._width, expand_tabs=False,
initial_indent=initial_text,
subsequent_indent=' ' * (len(initial_text) + extra_indent))
def _format_positional_metadata_column(self, key, dtype):
key_fmt = self._format_key(key)
dtype_fmt = '<dtype: %s>' % str(dtype)
return self._wrap_text_with_indent(dtype_fmt, key_fmt, 1)
def _find_optimal_seq_chunking(self):
"""Find the optimal number of sequence chunks to fit on a single line.
Returns the number of lines the sequence will occupy, the number of
sequence characters displayed on each line, and the column width
necessary to display position info using the optimal number of sequence
chunks.
"""
# strategy: use an iterative approach to find the optimal number of
# sequence chunks per line. start with a single chunk and increase
# until the max line width is exceeded. when this happens, the previous
# number of chunks is optimal
num_lines = 0
num_chars = 0
column_width = 0
num_chunks = 1
not_exceeded = True
while not_exceeded:
line_len, new_chunk_info = self._compute_chunked_seq_line_len(
num_chunks)
not_exceeded = line_len <= self._width
if not_exceeded:
num_lines, num_chars, column_width = new_chunk_info
num_chunks += 1
return num_lines, num_chars, column_width
def _compute_chunked_seq_line_len(self, num_chunks):
"""Compute line length based on a number of chunks."""
num_chars = num_chunks * self._chunk_size
# ceil to account for partial line
num_lines = int(math.ceil(len(self._seq) / num_chars))
# position column width is fixed width, based on the number of
# characters necessary to display the position of the final line (all
# previous positions will be left justified using this width)
column_width = len('%d ' % ((num_lines - 1) * num_chars))
# column width + number of sequence characters + spaces between chunks
line_len = column_width + num_chars + (num_chunks - 1)
return line_len, (num_lines, num_chars, column_width)
def _format_chunked_seq(self, line_idxs, num_chars, column_width):
"""Format specified lines of chunked sequence data."""
lines = []
for line_idx in line_idxs:
seq_idx = line_idx * num_chars
chars = str(self._seq[seq_idx:seq_idx+num_chars])
chunked_chars = chunk_str(chars, self._chunk_size, ' ')
lines.append(('%d' % seq_idx).ljust(column_width) + chunked_chars)
return lines
| bsd-3-clause |
Pinafore/qb | qanta/hyperparam.py | 2 | 1848 | import copy
import json
import yaml
from sklearn.model_selection import ParameterGrid
def expand_config(base_file, hyper_file, output_file):
"""
This is useful for taking the qanta.yaml config, a set of values to try for different hyper parameters, and
generating a configuration representing each value in the parameter sweep
"""
with open(base_file) as f:
base_conf = yaml.load(f)
with open(hyper_file) as f:
hyper_conf = yaml.load(f)
all_base_guessers = base_conf["guessers"]
final_guessers = {}
for guesser, params in hyper_conf["parameters"].items():
base_guesser_conf = all_base_guessers[guesser]
if len(base_guesser_conf) != 1:
raise ValueError(
"More than one configuration for parameter tuning base is invalid"
)
base_guesser_conf = base_guesser_conf[0]
parameter_set = set(base_guesser_conf.keys()) | set(params.keys())
param_grid = {}
for p in parameter_set:
if p in params:
param_grid[p] = params[p]
else:
param_grid[p] = [base_guesser_conf[p]]
parameter_list = list(ParameterGrid(param_grid))
final_guessers[guesser] = parameter_list
final_conf = copy.deepcopy(base_conf)
for g in final_conf["guessers"]:
if g in final_guessers:
final_conf["guessers"][g] = copy.deepcopy(final_guessers[g])
# There is a bug in yaml.dump that doesn't handle outputting nested dicts/arrays correctly. I didn't want to debug
# So instead output to json then convert that to yaml
with open("/tmp/qanta-tmp.json", "w") as f:
json.dump(final_conf, f)
with open("/tmp/qanta-tmp.json") as f:
conf = json.load(f)
with open(output_file, "w") as f:
yaml.dump(conf, f)
| mit |
gtesei/fast-furious | dataset/images2/serializerDogsCats_test.py | 1 | 1050 | import mahotas as mh
from sklearn import cross_validation
from sklearn.linear_model.logistic import LogisticRegression
import numpy as np
from glob import glob
from edginess import edginess_sobel
basedir = 'test_dogs_vs_cats'
def features_for(im):
im = mh.imread(im,as_grey=True).astype(np.uint8)
#return mh.features.haralick(im).mean(0)
return np.squeeze(mh.features.haralick(im)).reshape(-1)
features = []
sobels = []
images = glob('{}/*.jpg'.format(basedir))
ims = len(images)
for i in range(1,ims+1):
im = 'test_dogs_vs_cats/'+str(i)+'.jpg'
print('processing ' +str(im) +' ...')
#for im in images:
features.append(features_for(im))
sobels.append(edginess_sobel(mh.imread(im, as_grey=True)))
features = np.array(features)
n = features.shape
print('features='+str(n))
print(str(features))
features_sobels = np.hstack([np.atleast_2d(sobels).T,features])
np.savetxt("test_featuresDogsCatsE.zat", features, delimiter=",")
np.savetxt("test_featuresDogsCats_sobelsE.zat", features_sobels, delimiter=",")
| mit |
public-ink/public-ink | server/appengine/lib/matplotlib/sphinxext/mathmpl.py | 12 | 3822 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import sys
from hashlib import md5
from docutils import nodes
from docutils.parsers.rst import directives
import warnings
from matplotlib import rcParams
from matplotlib.mathtext import MathTextParser
rcParams['mathtext.fontset'] = 'cm'
mathtext_parser = MathTextParser("Bitmap")
# Define LaTeX math node:
class latex_math(nodes.General, nodes.Element):
pass
def fontset_choice(arg):
return directives.choice(arg, ['cm', 'stix', 'stixsans'])
options_spec = {'fontset': fontset_choice}
def math_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
i = rawtext.find('`')
latex = rawtext[i+1:-1]
node = latex_math(rawtext)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node], []
math_role.options = options_spec
def math_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
latex = ''.join(content)
node = latex_math(block_text)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node]
# This uses mathtext to render the expression
def latex2png(latex, filename, fontset='cm'):
latex = "$%s$" % latex
orig_fontset = rcParams['mathtext.fontset']
rcParams['mathtext.fontset'] = fontset
if os.path.exists(filename):
depth = mathtext_parser.get_depth(latex, dpi=100)
else:
try:
depth = mathtext_parser.to_png(filename, latex, dpi=100)
except:
warnings.warn("Could not render math expression %s" % latex,
Warning)
depth = 0
rcParams['mathtext.fontset'] = orig_fontset
sys.stdout.write("#")
sys.stdout.flush()
return depth
# LaTeX to HTML translation stuff:
def latex2html(node, source):
inline = isinstance(node.parent, nodes.TextElement)
latex = node['latex']
name = 'math-%s' % md5(latex.encode()).hexdigest()[-10:]
destdir = os.path.join(setup.app.builder.outdir, '_images', 'mathmpl')
if not os.path.exists(destdir):
os.makedirs(destdir)
dest = os.path.join(destdir, '%s.png' % name)
path = '/'.join((setup.app.builder.imgpath, 'mathmpl'))
depth = latex2png(latex, dest, node['fontset'])
if inline:
cls = ''
else:
cls = 'class="center" '
if inline and depth != 0:
style = 'style="position: relative; bottom: -%dpx"' % (depth + 1)
else:
style = ''
return '<img src="%s/%s.png" %s%s/>' % (path, name, cls, style)
def setup(app):
setup.app = app
# Add visit/depart methods to HTML-Translator:
def visit_latex_math_html(self, node):
source = self.document.attributes['source']
self.body.append(latex2html(node, source))
def depart_latex_math_html(self, node):
pass
# Add visit/depart methods to LaTeX-Translator:
def visit_latex_math_latex(self, node):
inline = isinstance(node.parent, nodes.TextElement)
if inline:
self.body.append('$%s$' % node['latex'])
else:
self.body.extend(['\\begin{equation}',
node['latex'],
'\\end{equation}'])
def depart_latex_math_latex(self, node):
pass
app.add_node(latex_math,
html=(visit_latex_math_html, depart_latex_math_html),
latex=(visit_latex_math_latex, depart_latex_math_latex))
app.add_role('math', math_role)
app.add_directive('math', math_directive,
True, (0, 0, 0), **options_spec)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
| gpl-3.0 |
smsolivier/VEF | code/mms.py | 1 | 2745 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import ld as LD
import dd as DD
from scipy.interpolate import interp1d
from hidespines import *
from R2 import *
import sys
''' Test order of accuracy for LD options '''
if (len(sys.argv) > 1):
outfile = sys.argv[1]
else:
outfile = None
def getOrder(sol, N, tol, label):
print('Method =', sol[0].name)
phi_mms = lambda x: np.sin(np.pi*x/xb) # exact solution
err = np.zeros(len(sol))
for i in range(len(sol)):
sol[i].setMMS()
# make video
# x, phi, it = sol[i].sourceIteration(tol, PLOT='phi' + str(N[i]))
x, phi, it = sol[i].sourceIteration(tol, 1000)
phi_int = interp1d(x, phi)
# err[i] = np.fabs(phi_mms(xb/2) - phi_int(xb/2))/phi_mms(xb/2)
err[i] = np.linalg.norm(phi_mms(x) - phi, 2)/np.linalg.norm(phi_mms(x), 2)
# plt.plot(x, phi, '-o')
# plt.show()
fit = np.polyfit(np.log(1/N), np.log(err), 1)
# fit equation
f = lambda x: np.exp(fit[1]) * x**(fit[0])
# R^2 value
r2 = rsquared(err, f(1/N))
print(fit[0], np.exp(fit[1]), r2)
plt.loglog(xb/N, err, '-o', clip_on=False, label=label)
return err
# N = np.array([80, 160, 320, 640, 1280])
N = np.logspace(1.2, 2.5, 3)
N = np.array([int(x) for x in N])
n = 8
Sigmaa = lambda x: .1
Sigmat = lambda x: 1
q = lambda x, mu: 1
xb = 5
tol = 1e-10
# make solver objects
ed = [LD.LD(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat,q) for x in N]
s2 = [LD.S2SA(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q) for x in N]
ed00 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=0, GAUSS=0) for x in N]
ed01 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=0, GAUSS=1) for x in N]
ed10 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=1, GAUSS=0) for x in N]
ed11 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=1, GAUSS=1) for x in N]
ed20 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat,q, OPT=2, GAUSS=0) for x in N]
ed21 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=2, GAUSS=1) for x in N]
# get order of accuracy
# err = getOrder(ed, N, tol, 'LD')
# err00 = getOrder(ed00, N, tol, 'MHFEM Edges, No Gauss')
# err01 = getOrder(ed01, N, tol, 'Maintain Slopes, No Gauss')
# err10 = getOrder(ed10, N, tol, 'MHFEM Edges, Gauss')
# err11 = getOrder(ed11, N, tol, 'Maintain Slopes, Gauss')
# err20 = getOrder(ed20, N, tol, 'vanLeer, No Gauss')
# err21 = getOrder(ed21, N, tol, 'vanLeer, Gauss')
err = getOrder(s2, N, tol, 'S2SA')
plt.legend(loc='best', frameon=False)
plt.xlabel(r'$h$', fontsize=20)
plt.ylabel('Error', fontsize=20)
hidespines(plt.gca())
if (outfile != None):
plt.savefig(outfile, transparent=True)
else:
plt.show() | mit |
mmottahedi/neuralnilm_prototype | scripts/e300.py | 2 | 5276 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
SEQ_LENGTH = 512
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.0,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=True,
input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
'std': np.array([ 0.12636775], dtype=np.float32)},
target_stats={
'mean': np.array([ 0.04066789, 0.01881946,
0.24639061, 0.17608672, 0.10273963],
dtype=np.float32),
'std': np.array([ 0.11449792, 0.07338708,
0.26608968, 0.33463112, 0.21250485],
dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
updates_func=momentum,
learning_rate=1e-04,
learning_rate_changes_by_iteration={
100: 5e-05,
500: 1e-05,
2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
plotter=MDNPlotter,
layers_config=[
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
}
],
do_save_activations=False
)
def exp_a(name):
# 3 appliances
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'].extend([
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
])
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=4000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
yashsavani/rechorder | motif.py | 1 | 1780 | #!/usr/bin/python
import util
import chordKMeans
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pylab
import random
beatsPerBarDefault = 4
kMeansDefault = 7
def generateAndWriteCentroids(midiFiles, numCentroids=kMeansDefault, beatsPerBar = beatsPerBarDefault, fileName = None):
featureCentroids, centroidPoints = chordKMeans.getFeatureCentroids(midiFiles, numCentroids=numCentroids, beatsPerBar=beatsPerBar)
if not fileName:
# make a random name
fileName = ''.join([random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(10)]) + '.mtf'
print 'printing to', fileName
with open(fileName, 'w') as f:
for arr in featureCentroids:
f.write(' '.join(map(str, arr.tolist())))
f.write('\n')
return fileName
def readCentroids(fileName):
with open(fileName, 'r') as f:
mat = []
for l in f:
arr = []
for x in l.split():
arr.append(float(x))
mat.append(arr)
return np.matrix(mat)
# catch exception?
if __name__ == "__main__":
if len(sys.argv) <= 2:
print "Please give filename and the midi files."
sys.exit(1)
else:
filename = sys.argv[1]+".mtf"
midiFiles = sys.argv[2:]
num_reps = 5
centers = [chordKMeans.getFeatureCentroids(midiFiles, numCentroids=kMeansDefault, beatsPerBar=beatsPerBarDefault) for _ in range(num_reps)]
results = [chordKMeans.evaluateKmeansClusters(midiFiles, centroids, corr_centers) \
for (centroids, corr_centers) in centers]
enum = [(e,z) for e,z in enumerate(results)]
featureCentroids = centers[max(enum, key=lambda x: x[1])[0]][0]
print 'writing to', filename
with open(filename, 'w') as f:
for arr in featureCentroids:
f.write(' '.join(map(str, arr.tolist())))
f.write('\n')
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/metrics/cluster/supervised.py | 7 | 31476 | """Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# Gregory Stupp <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
from math import log
import numpy as np
from scipy.misc import comb
from scipy import sparse as sp
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
from ...utils.validation import check_array
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays."""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, sparse=False):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps : None or float, optional.
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : boolean, optional.
If True, return a sparse CSR continency matrix. If ``eps is not None``,
and ``sparse is True``, will throw ValueError.
.. versionadded:: 0.18
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
Will be a ``scipy.sparse.csr_matrix`` if ``sparse=True``.
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
n_classes = np.unique(labels_true).shape[0]
n_clusters = np.unique(labels_pred).shape[0]
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (n_classes == n_clusters == 1 or
n_classes == n_clusters == 0 or
n_classes == n_clusters == n_samples):
return 1.0
# Compute the ARI using the contingency data
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
sum_comb_c = sum(comb2(n_c) for n_c in np.ravel(contingency.sum(axis=1)))
sum_comb_k = sum(comb2(n_k) for n_k in np.ravel(contingency.sum(axis=0)))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.data)
prod_comb = (sum_comb_c * sum_comb_k) / comb(n_samples, 2)
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return (sum_comb - prod_comb) / (mean_comb - prod_comb)
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure : float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
MI = mutual_info_score(None, None, contingency=contingency)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness /
(homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency : {None, array, sparse matrix},
shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(contingency,
accept_sparse=['csr', 'csc', 'coo'],
dtype=[int, np.int32, np.int64])
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
elif sp.issparse(contingency):
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
else:
raise ValueError("Unsupported type for 'contingency': %s" %
type(contingency))
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = pi.take(nzx) * pj.take(nzy)
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def fowlkes_mallows_score(labels_true, labels_pred, sparse=False):
"""Measure the similarity of two clusterings of a set of points.
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<http://wildfire.stat.ucla.edu/pdflibrary/fowlkes.pdf>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples, = labels_true.shape
c = contingency_matrix(labels_true, labels_pred, sparse=True)
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return tk / np.sqrt(pk * qk) if tk != 0. else 0.
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float64)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| mit |
zooniverse/aggregation | experimental/serengeti/blankImage.py | 1 | 5730 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import urllib
import matplotlib.pyplot as plt
import cv2
from cassandra.cluster import Cluster
import cassandra
from cassandra.concurrent import execute_concurrent
import psycopg2
import psycopg2
import numpy as np
from scipy.stats import chi2
import math
db_name = "condor"
date = "_2014-11-23"
client = pymongo.MongoClient()
db = client[db_name+date]
subjects = db[db_name+"_subjects"]
classifications = db[db_name+"_classifications"]
conn = psycopg2.connect("dbname='"+db_name+"' user='greg' host='localhost' password='apassword'")
cur = conn.cursor()
# cur.execute("drop table serengeti")
cur.execute("create table "+db_name+"(user_name text,created_at timestamp)")#, PRIMARY KEY (user_name,created_at))")
cur.execute("create index "+db_name+"1 on "+db_name+" (user_name)")
cur.execute("create index "+db_name+"2 on "+db_name+" (user_name,created_at)")
for i,c in enumerate(classifications.find().limit(15000000)):
if "user_name" in c:
user_name = c["user_name"]
assert isinstance(user_name,unicode)
# print user_name
user_name = user_name.replace("'","")
cur.execute("insert into "+db_name+" (user_name,created_at) values ('"+user_name+"','"+str(c["created_at"])+"')")
if i% 1000 == 0:
print i
conn.commit()
# connect to the mongodb server
cur.execute("select distinct user_name from " + db_name)
users = [c[0] for c in cur.fetchall()]
all_f = []
percentiles = [0.01,0.02,0.03,0.04]
percentiles.extend(np.arange(0.05,1,0.05))
percentiles.extend([0.96,0.97,0.98,0.99])
A = {p:[] for p in percentiles}
B = {p:[] for p in percentiles}
C = {p:[] for p in percentiles}
pp = 0.6
percentile_hist = []
lengths = []
lengths_2 = []
X = []
Y = []
for ii,u in enumerate(users):
# print u
cur.execute("select created_at from " +db_name + " where user_name = '" + u + "'")
timestamps = [c[0] for c in cur.fetchall()]
previous_index = 0
session_lengths = []
time_out = []
for t_index in range(1,len(timestamps)):
delta_t = timestamps[t_index]-timestamps[t_index-1]
if delta_t.seconds == 0:
continue
if delta_t.seconds > 60*15:
session_lengths.append(t_index-previous_index)
previous_index = t_index
session_lengths.append(len(timestamps)-previous_index)
if (len(session_lengths) < 5):# or (len(session_lengths) >= 40):
continue
# p = np.percentile(np.asarray(session_lengths),50)
# session_lengths = [s-p for s in session_lengths if s > p]
# session_lengths.sort()
# # print len(session_lengths)
# mean = np.mean(session_lengths)
# t = [s/mean for s in session_lengths if s/mean < 20]
# # n,bins,patches = plt.hist(t, 25, normed=1, facecolor='green', alpha=0.5)
# n,bins = np.histogram(t,40)
# # plt.close()
# n_bins = zip(n,bins)
# threshold = max(n_bins,key = lambda x:x[0])[1]
#
#
# # if ii >= 300:
# # assert False
#
# mean = np.mean(session_lengths)
# session_lengths = [s-mean*threshold for s in session_lengths if s > mean*threshold]
# if mean > 80:
# print u
c = float(max(session_lengths))
a = float(min(session_lengths))
# normalized_s = [(s-min_s)/(max_s-min_s) for s in session_lengths]
# print normalized_s
x_bar = (np.mean(session_lengths)-a)/(c-a)
v_bar = np.var(session_lengths,ddof=1)/((c-a)**2)
# if v_bar >= (x_bar*(1-x_bar)):
# print "skipping"
# continue
#
# alpha = x_bar*(x_bar*(1-x_bar)/v_bar-1)
# beta = (1-x_bar)*(x_bar*(1-x_bar)/v_bar-1)
#
# from scipy.stats import beta as beta_func
#
#
#
# if min(alpha,beta) > 1:
# mode = (alpha-1)/(alpha+beta-2) * (c-a) + a
#
# session_lengths = [s for s in session_lengths if s >= mode]
#
# # r = beta_func.rvs(alpha, beta)
# # fig, ax = plt.subplots(1, 1)
# # x = np.linspace(beta_func.ppf(0.01, alpha, beta),beta_func.ppf(0.99, alpha, beta), 100)
# # rv = beta_func(alpha, beta)
# # ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
# # plt.show()
# #
# # plt.hist(session_lengths,10)
# # plt.plot([mode,mode],[0,10])
# # plt.show()
#
mean = np.mean(session_lengths)
lengths.extend(session_lengths)
lengths_2.append(np.mean(session_lengths))
num_samples = len(session_lengths)
ub = (2*num_samples*mean)/chi2.ppf(0.025, 2*num_samples)
lb = (2*num_samples*mean)/chi2.ppf(1-0.025, 2*num_samples)
# print lb,mean,ub
for pp in percentiles:
l_median = -math.log(1-pp)*lb
median = -math.log(1-pp)*mean
u_median = -math.log(1-pp)*ub
A[pp].append(len([1. for s in session_lengths if s <= l_median])/float(num_samples))
B[pp].append(len([1. for s in session_lengths if s <= median])/float(num_samples))
C[pp].append(len([1. for s in session_lengths if s <= u_median])/float(num_samples))
print len(A[percentiles[0]])
# print np.mean(A)
# print np.mean(B)
# print np.mean(C)
# lower:
m = [np.mean(A[pp]) for pp in percentiles]
plt.plot(percentiles,m)
m = [np.mean(B[pp]) for pp in percentiles]
plt.plot(percentiles,m)
m = [np.mean(C[pp]) for pp in percentiles]
plt.plot(percentiles,m)
plt.plot([0,1],[0,1],"--",color="black")
plt.show()
print np.mean(lengths),np.median(lengths),np.percentile(lengths,90),np.percentile(lengths,99)
print np.percentile(lengths_2,1),np.mean(lengths_2),np.median(lengths_2),np.percentile(lengths_2,90),np.percentile(lengths_2,99)
plt.hist(lengths_2, 50, normed=1, facecolor='green')
plt.show() | apache-2.0 |
MartinDelzant/scikit-learn | sklearn/tests/test_calibration.py | 213 | 12219 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
AlexRobson/scikit-learn | sklearn/neighbors/tests/test_kde.py | 208 | 5556 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
mgaitan/scipy | scipy/spatial/_plotutils.py | 53 | 4034 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
was_held = ax.ishold()
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
ptp_bound = points.ptp(axis=0)
ax.set_xlim(points[:,0].min() - 0.1*ptp_bound[0],
points[:,0].max() + 0.1*ptp_bound[0])
ax.set_ylim(points[:,1].min() - 0.1*ptp_bound[1],
points[:,1].max() + 0.1*ptp_bound[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
ax.plot(tri.points[:,0], tri.points[:,1], 'o')
ax.triplot(tri.points[:,0], tri.points[:,1], tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
for simplex in hull.simplices:
ax.plot(hull.points[simplex,0], hull.points[simplex,1], 'k-')
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
"""
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
ax.plot(vor.points[:,0], vor.points[:,1], '.')
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
for simplex in vor.ridge_vertices:
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
ax.plot(vor.vertices[simplex,0], vor.vertices[simplex,1], 'k-')
ptp_bound = vor.points.ptp(axis=0)
center = vor.points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
ax.plot([vor.vertices[i,0], far_point[0]],
[vor.vertices[i,1], far_point[1]], 'k--')
_adjust_bounds(ax, vor.points)
return ax.figure
| bsd-3-clause |
roshchupkin/hase | tools/mapper.py | 1 | 9070 | import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config import *
if PYTHON_PATH is not None:
for i in PYTHON_PATH: sys.path.insert(0,i)
import h5py
import pandas as pd
import numpy as np
import argparse
from hdgwas.tools import Reference, Mapper, Timer
from hdgwas.hash import *
import gc
if __name__=='__main__':
os.environ['HASEDIR']=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser = argparse.ArgumentParser(description='Script to map studies for meta-stage')
parser.add_argument("-g",required=True, type=str, help="path/paths to genotype data folder")
parser.add_argument('-study_name',type=str,required=True, default=None, help=' Study names')
parser.add_argument("-o", "--out", type=str,required=True, help="path to save result folder")
parser.add_argument('-ref_name', type=str, default='1000Gp1v3_ref', help='Reference panel name')
parser.add_argument('-mismatch_table',action='store_true',default=False, help='Save table with mismatch IDs')
parser.add_argument('-flipped_table',action='store_true',default=False, help='Save table with mismatch IDs')
parser.add_argument('-probe_chunk',type=int,default=10000, help='Probes chunk')
parser.add_argument('-ref_chunk',type=int,default=10000, help='Reference chunk')
parser.add_argument('-chunk',type=int,default=2000000, help='Chunk size')
args = parser.parse_args()
print args
try:
print ('Creating directories...')
os.mkdir(args.out)
except:
print('Directory {} is already exist!'.format(args.out))
probes=pd.HDFStore(os.path.join(args.g,'probes', args.study_name+'.h5'),'r')
probes_n_rows=probes.get_storer('probes').nrows
chunk_size = np.min([args.chunk,probes_n_rows])
print ('Merge chunk size {}'.format(chunk_size))
match_key=np.array([],dtype=np.int32)
match_index=np.array([],dtype=np.int32)
flip_key=np.array([],dtype=np.int32)
flip_index=np.array([],dtype=np.int32)
ID=np.array([])
del_counter_ref={}
ID2CHR=False
IDconv=False
hashing=False
merge_on={
'ID':{
'straight':["ID",'allele1','allele2'],
'reverse':["ID",'allele2','allele1']
},
'CHR':{
'straight':["CHR",'bp','allele1','allele2'],
'reverse':["CHR",'bp','allele2','allele1']
}
}
for p in xrange(int(np.ceil(probes_n_rows / float(chunk_size)))):
print 'p',p
p_start_i = p * chunk_size
p_stop_i = min((p + 1) * chunk_size, probes_n_rows)
a = probes.select('probes', start = p_start_i, stop = p_stop_i)
if p==0:
print a.head()
if issubclass(type(a.iloc[0]['allele1']), np.str):
hashing=True
if "CHR" in a.columns and 'bp' in a.columns:
ID2CHR=True
merge=merge_on['CHR']
print ('Merge on CHR/bp')
else:
if ':' in a.ID.iloc[0] and ':' in a.ID.iloc[1]:
CHR=[]
bp=[]
for i in a.ID:
s=i.split(":")
CHR.append(s[0])
bp.append(s[1])
CHR=np.array(CHR,dtype=np.int8)
bp=np.array(bp)
if np.max(CHR)<23 and np.min(CHR)>0:
a['CHR']=CHR
a['bp']=bp
a.CHR = a.CHR.astype(np.int64)
a.bp = a.bp.astype(np.int64)
ID2CHR=True
IDconv=True
merge=merge_on['CHR']
print ('Merge on CHR/bp from ID')
print a.head()
else:
print 'No CHR and bp info...'
merge=merge_on['ID']
print ('Merge on ID')
else:
print 'No CHR and bp info...'
merge=merge_on['ID']
print ('Merge on ID')
elif IDconv:
def f(x):
s=x.ID.split(':')
return s[0],s[1]
CHR_bp=a.apply(f, axis=1 )
a['CHR'],a['bp']=zip(*CHR_bp)
a.CHR=a.CHR.astype(np.int64)
a.bp= a.bp.astype(np.int64)
print a.head()
a['counter_prob']=np.arange(p_start_i,p_stop_i,dtype='int32')
reference=Reference()
reference.name=args.ref_name
reference.chunk=args.ref_chunk
reference.load()
counter_ref=0
if hashing:
print 'Hashing...'
a.allele1=a.allele1.apply(hash)
a.allele2=a.allele2.apply(hash)
for r,b in enumerate(reference.dataframe):
if r==0:
if np.sum(np.array([ 1 if i in reference.columns else 0 for i in b.columns.tolist() ]))!=len(reference.columns):
raise ValueError('Reference table should have {} columns'.format(reference.columns))
if r==0 and p==0:
print ('********************************')
print('Use {} as a reference panel'.format(args.ref_name))
print b.head()
print ('********************************')
print 'r',r
if p==0:
ID=np.append(ID,b.ID)
b['counter_ref']=np.arange(counter_ref,counter_ref+b.shape[0],dtype='int32')
counter_ref+=b.shape[0]
if len(match_index) or len(flip_index):
print 'matched {}'.format(match_index.shape[0])
print 'flipped {}'.format(flip_index.shape[0])
if del_counter_ref.get(r) is not None:
with Timer() as t:
b=b[~b.counter_ref.isin(del_counter_ref[r])]
print 'time {}'.format(t.secs)
match_df = pd.merge(b,a, left_on=merge['straight'], right_on=merge['straight'])
flip_df=pd.merge(b[~b.counter_ref.isin(match_df.counter_ref)],a, left_on=merge['reverse'], right_on=merge['straight'])
if len(match_df):
match_key=np.append(match_key,match_df.counter_ref)
match_index=np.append(match_index,match_df.counter_prob)
if del_counter_ref.get(r) is None:
del_counter_ref[r]=match_key
else:
del_counter_ref[r]=np.append(del_counter_ref[r], match_key)
if len(flip_df):
flip_key=np.append(flip_key,flip_df.counter_ref)
flip_index=np.append(flip_index,flip_df.counter_prob)
if del_counter_ref.get(r) is None:
del_counter_ref[r]=flip_key
else:
del_counter_ref[r]=np.append(del_counter_ref[r], flip_key)
gc.collect()
index=np.ones(ID.shape[0],dtype='int')*-1
flip=np.ones(probes_n_rows,dtype='int')
index[match_key]=match_index
index[flip_key]=flip_index
flip[flip_index]=-1
print ('Saving results for {} to {} ...'.format(args.study_name,args.out))
np.save(os.path.join(args.out,'values_'+reference.name+'_'+args.study_name+'.npy'),index)
np.save(os.path.join(args.out,'flip_'+reference.name+'_'+args.study_name+'.npy'),flip)
np.save(os.path.join(args.out,'keys_'+reference.name+'.npy'),ID)
print ('Data successfully saved')
mismatch_index=np.setdiff1d(np.arange(probes_n_rows),np.append(match_index,flip_index) )
if os.path.isfile(os.path.join(args.g,'probes', args.study_name+'_hash_table.csv.gz')):
try:
df_hash=pd.read_csv(os.path.join(args.g,'probes', args.study_name+'_hash_table.csv.gz'),sep='\t', compression='gzip', index_col=False)
except:
df_hash=pd.read_csv(os.path.join(args.g,'probes', args.study_name+'_hash_table.csv.gz'),sep='\t', index_col=False)
else:
df_hash=None
print ('You do not have hash_table for alleles in your probes folder! '
'You used old version of HASE to convert your genotype data.'
'To see original codes for allele you can make hash_table using script'
'{}/tools/tools.py -hash -g "original genotype folder" '.format(os.environ['HASEDIR']))
print 'There are {} common variances with reference panel, which will be included in study'.format(np.where(index!=-1)[0].shape[0] )
print 'There are {} variances from reference panel, which were not found in probes'.format(np.where(index==-1)[0].shape[0] )
print 'There are {} variances excluded from study (not found in reference panel)'.format( probes_n_rows-np.where(index!=-1)[0].shape[0] )
if args.mismatch_table and mismatch_index.shape[0]!=0:
df_mismatch=probes.select('probes',where=mismatch_index)
if df_hash is not None and not hashing:
df_mismatch=pd.merge(df_hash,df_mismatch,left_on='keys', right_on='allele1')
df_mismatch['str_allele1']=df_mismatch['allele']
del df_mismatch['allele']
df_mismatch=pd.merge(df_hash,df_mismatch,left_on='keys', right_on='allele2')
df_mismatch['str_allele2']=df_mismatch['allele']
del df_mismatch['allele']
del df_mismatch['keys_x']
del df_mismatch['keys_y']
df_mismatch.to_csv(os.path.join(args.out,'mismatch_ID_info.csv'))
print 'Mismatch ID info saved to {}'.format(os.path.join(args.out,args.study_name+'_mismatch_ID_info.csv'))
elif mismatch_index.shape[0]!=0:
print ('Mismatch examples:')
print probes.select('probes',where=mismatch_index[:10])
print 'There are {} flipped variances'.format(len(flip_index))
if args.flipped_table and flip_index.shape[0]!=0:
df_flipped=probes.select('probes',where=flip_index)
if df_hash is not None and not hashing:
df_flipped=pd.merge(df_hash,df_flipped,left_on='keys', right_on='allele1')
df_flipped['str_allele1']=df_flipped['allele']
del df_flipped['allele']
df_flipped=pd.merge(df_hash,df_flipped,left_on='keys', right_on='allele2')
df_flipped['str_allele2']=df_flipped['allele']
del df_flipped['allele']
del df_flipped['keys_x']
del df_flipped['keys_y']
df_flipped.to_csv(os.path.join(args.out,'flipped_ID_info.csv'))
print 'Flipped ID info saved to {}'.format(os.path.join(args.out,args.study_name + '_flipped_ID_info.csv'))
elif flip_index.shape[0]!=0:
print ('Flipped examples:')
print probes.select('probes',where=flip_index[:10])
| gpl-3.0 |
EarToEarOak/RTLSDR-Scanner | rtlsdr_scanner/spectrum.py | 1 | 9869 | #
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from collections import OrderedDict
from decimal import Decimal
from operator import itemgetter, mul
from matplotlib.dates import seconds
import numpy
from rtlsdr_scanner.constants import WINFUNC
from rtlsdr_scanner.misc import db_to_level, level_to_db
from rtlsdr_scanner.utils_mpl import utc_to_mpl
class Extent(object):
def __init__(self, spectrum):
self.__clear()
self.__calc_extent(spectrum)
def __clear(self):
self.fMin = float('inf')
self.fMax = float('-inf')
self.lMin = float('inf')
self.lMax = float('-inf')
self.tMin = float('inf')
self.tMax = float('-inf')
self.fPeak = None
self.lPeak = None
self.tPeak = None
def __calc_extent(self, spectrum):
self.tMin = min(spectrum)
self.tMax = max(spectrum)
for timeStamp in spectrum:
points = spectrum[timeStamp].items()
if len(points) > 0:
fMin = min(points, key=itemgetter(0))[0]
fMax = max(points, key=itemgetter(0))[0]
lMin = min(points, key=itemgetter(1))[1]
lMax = max(points, key=itemgetter(1))[1]
self.fMin = min(self.fMin, fMin)
self.fMax = max(self.fMax, fMax)
self.lMin = min(self.lMin, lMin)
if(lMax >= self.lMax):
self.lMax = lMax
self.fPeak, self.lPeak = max(points, key=lambda(_f, l): l)
self.tPeak = timeStamp
def get_f(self):
if self.fMin == self.fMax:
return self.fMin, self.fMax - 0.001
return self.fMin, self.fMax
def get_l(self):
if self.lMin == self.lMax:
return self.lMin, self.lMax + 0.001
return self.lMin, self.lMax
def get_t(self):
return utc_to_mpl(self.tMax), utc_to_mpl(self.tMin - 1)
def get_ft(self):
tExtent = self.get_t()
return [self.fMin, self.fMax, tExtent[0], tExtent[1]]
def get_peak_fl(self):
return self.fPeak, self.lPeak
def get_peak_flt(self):
return self.fPeak, self.lPeak, self.tPeak
class Measure(object):
MIN, MAX, AVG, GMEAN, HBW, OBW = range(6)
def __init__(self, spectrum, start, end):
self.isValid = False
self.minF = None
self.maxF = None
self.minP = None
self.maxP = None
self.avgP = None
self.gMeanP = None
self.flatness = None
self.hbw = None
self.obw = None
self.__calculate(spectrum, start, end)
def __calculate(self, spectrum, start, end):
sweep = slice_spectrum(spectrum, start, end)
if sweep is None or len(sweep) == 0:
return
self.minF = min(sweep)[0]
self.maxF = max(sweep)[0]
self.minP = min(sweep, key=lambda v: v[1])
self.maxP = max(sweep, key=lambda v: v[1])
powers = [Decimal(db_to_level(p[1])) for p in sweep]
length = len(powers)
avg = sum(powers, Decimal(0)) / length
self.avgP = level_to_db(avg)
product = reduce(mul, iter(powers))
gMean = product ** (Decimal(1.0) / length)
self.gMeanP = level_to_db(gMean)
self.flatness = gMean / avg
self.__calc_hbw(sweep)
self.__calc_obw(sweep)
self.isValid = True
def __calc_hbw(self, sweep):
power = self.maxP[1] - 3
self.hbw = [None, None, power]
if power >= self.minP[1]:
for (f, p) in sweep:
if p >= power:
self.hbw[0] = f
break
for (f, p) in reversed(sweep):
if p >= power:
self.hbw[1] = f
break
def __calc_obw(self, sweep):
self.obw = [None, None, None]
totalP = 0
for (_f, p) in sweep:
totalP += p
power = totalP * 0.005
self.obw[2] = power
if power >= self.minP[1]:
for (f, p) in sweep:
if p >= power:
self.obw[0] = f
break
for (f, p) in reversed(sweep):
if p >= power:
self.obw[1] = f
break
def is_valid(self):
return self.isValid
def get_f(self):
return self.minF, self.maxF
def get_min_p(self):
return self.minP
def get_max_p(self):
return self.maxP
def get_avg_p(self):
return self.avgP
def get_gmean_p(self):
return self.gMeanP
def get_flatness(self):
return self.flatness
def get_hpw(self):
return self.hbw
def get_obw(self):
return self.obw
def count_points(spectrum):
points = 0
for timeStamp in spectrum:
points += len(spectrum[timeStamp])
return points
def reduce_points(spectrum, limit):
total = count_points(spectrum)
if total < limit:
return spectrum
newSpectrum = OrderedDict()
ratio = float(total) / limit
for timeStamp in spectrum:
points = spectrum[timeStamp].items()
reduced = OrderedDict()
for i in xrange(int(len(points) / ratio)):
point = points[int(i * ratio):int((i + 1) * ratio)][0]
reduced[point[0]] = point[1]
newSpectrum[timeStamp] = reduced
return newSpectrum
def split_spectrum(spectrum):
freqs = spectrum.keys()
powers = map(spectrum.get, freqs)
return freqs, powers
def split_spectrum_sort(spectrum):
freqs = spectrum.keys()
freqs.sort()
powers = map(spectrum.get, freqs)
return freqs, powers
def slice_spectrum(spectrum, start, end):
if spectrum is None or start is None or end is None or len(spectrum) < 1:
return None
sweep = spectrum[max(spectrum)]
if len(sweep) == 0:
return None
if min(sweep) > start or max(sweep) < end:
length = len(spectrum)
if length > 1:
sweep = spectrum.values()[length - 2]
else:
return None
sweepTemp = {}
for f, p in sweep.iteritems():
if start <= f <= end:
sweepTemp[f] = p
return sorted(sweepTemp.items(), key=lambda t: t[0])
def create_mesh(spectrum, mplTime):
total = len(spectrum)
width = len(spectrum[min(spectrum)])
x = numpy.empty((width, total + 1)) * numpy.nan
y = numpy.empty((width, total + 1)) * numpy.nan
z = numpy.empty((width, total + 1)) * numpy.nan
j = 1
for ys in spectrum:
time = utc_to_mpl(ys) if mplTime else ys
xs, zs = split_spectrum(spectrum[ys])
for i in range(len(xs)):
x[i, j] = xs[i]
y[i, j] = time
z[i, j] = zs[i]
j += 1
x[:, 0] = x[:, 1]
if mplTime:
y[:, 0] = y[:, 1] - seconds(1)
else:
y[:, 0] = y[:, 1] - 1
z[:, 0] = z[:, 1]
return x, y, z
def sort_spectrum(spectrum):
newSpectrum = OrderedDict()
for timeStamp in sorted(spectrum):
newPoints = OrderedDict()
points = sorted(spectrum[timeStamp].items())
for point in points:
newPoints[point[0]] = point[1]
newSpectrum[timeStamp] = newPoints
return newSpectrum
def diff_spectrum(spectrum):
data = OrderedDict()
for timeStamp, sweep in spectrum.items():
diff = numpy.diff(sweep.values())
data[timeStamp] = OrderedDict(zip(sweep.keys(), diff))
return data
def delta_spectrum(spectrum):
data = OrderedDict()
if len(spectrum) > 1:
_t, baseline = spectrum.items()[0]
for timeStamp, sweep in spectrum.items()[1:]:
delta = [(freq, sweep[freq] - baseline.get(freq, 0))
for freq in sweep.keys()]
data[timeStamp] = OrderedDict(delta)
else:
data = spectrum
return data
def smooth_spectrum(spectrum, winFunc, ratio):
data = OrderedDict()
for timeStamp, sweep in spectrum.items():
if len(sweep):
data[timeStamp] = smooth_sweep(sweep, winFunc, ratio)
return data
def smooth_sweep(sweep, winFunc, ratio):
pos = WINFUNC[::2].index(winFunc)
function = WINFUNC[1::2][pos]
length = len(sweep) / ratio
if length < 3:
length = 3
window = function(length)
data = numpy.array([x[1] for x in sweep.items()])
series = numpy.r_[2 * data[0] - data[length - 1::-1],
data,
2 * data[-1] - data[-1:-length:-1]]
levels = numpy.convolve(window / window.sum(), series, mode='same')
smoothed = levels[length:-length + 1]
return OrderedDict(zip(sweep.keys(), smoothed))
def get_peaks(spectrum, threshold):
sweep = OrderedDict(spectrum[max(spectrum)])
for freq, level in sweep.items():
if level < threshold:
del sweep[freq]
indices = (numpy.diff(numpy.sign(numpy.diff(sweep.values()))) < 0).nonzero()[0] + 1
return sweep, indices
if __name__ == '__main__':
print 'Please run rtlsdr_scan.py'
exit(1)
| gpl-3.0 |
YaleDHLab/image-segmentation | british_library_periodicals/segment_periodicals.py | 1 | 2617 | from skimage import filters, segmentation, io
from skimage.measure import label, regionprops
from skimage.color import label2rgb
from scipy import ndimage
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import sys, os
image_file = sys.argv[1]
file_extension = image_file.split(".")[-1]
plots_to_show = []
if file_extension in ["jpg", "jpeg"]:
im = ndimage.imread(image_file)
elif file_extension in ["jp2"]:
im = io.imread(image_file, plugin='freeimage')
else:
print "your input file isn't jpg or jp2"
sys.exit()
############################
# X-Y axis pixel dilations #
############################
# plot the amount of white ink across the columns & rows
row_vals = list([sum(r) for r in im ])
col_vals = list([sum(c) for c in im.T])
if "col_sums" in plots_to_show:
plt.plot(col_vals)
plt.show()
if "row_sums" in plots_to_show:
plt.plot(row_vals)
plt.show()
#########################################
# Otsu method of boolean classification #
#########################################
val = filters.threshold_otsu(im)
mask = im < val
clean_border = segmentation.clear_border(mask)
plt.imshow(clean_border, cmap='gray')
plt.show()
#######################
# Label image regions #
#######################
labeled = label(clean_border)
image_label_overlay = label2rgb(labeled, image=im)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(image_label_overlay)
#plt.show()
#########################################
# Draw bounding box around each article #
#########################################
# create array in which to store cropped articles
cropped_images = []
# define amount of padding to add to cropped image
pad = 20
for region_index, region in enumerate(regionprops(labeled)):
if region.area < 2000:
continue
# draw a rectangle around the segmented articles
# bbox describes: min_row, min_col, max_row, max_col
minr, minc, maxr, maxc = region.bbox
# use those bounding box coordinates to crop the image
cropped_images.append(im[minr-pad:maxr+pad, minc-pad:maxc+pad])
print "region", region_index, "bounding box:", minr, minc, maxr, maxc
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
plt.show()
###############
# Crop images #
###############
out_dir = "segmented_articles/"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# can crop using: cropped = image_array[x1:x2,y1:y2]
for c, cropped_image in enumerate(cropped_images):
io.imsave( out_dir + str(c) + ".png", cropped_image)
| mit |
bikong2/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
yavuzovski/playground | machine learning/Udacity/ud120-projects/svm/svm_author_id.py | 1 | 1601 | #!/usr/bin/python
"""
This is the code to accompany the Lesson 2 (SVM) mini-project.
Use a SVM to identify emails from the Enron corpus by their authors:
Sara has label 0
Chris has label 1
"""
import sys
import collections
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
from sklearn import svm
from sklearn.metrics import accuracy_score
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
# use %1 of the data for more speed. accuracy drops to 0.88 while it is 0.98 with full data
# without c optimization. with c optimization and full data our accuracy is = 0.990898748578
# with c opt. and %1 data our accuracy is = 0.892491467577
# features_train = features_train[:len(features_train) / 100]
# labels_train = labels_train[:len(labels_train) / 100]
#########################################################
clf = svm.SVC(kernel='rbf', C=10000.0)
t0 = time()
clf.fit(features_train, labels_train)
print("training time: {0} s".format(round(time() - t0, 3)))
# predicts = [features_test[10], features_test[26], features_test[50]]
# pred = clf.predict(predicts)
pred = clf.predict(features_test)
print(sum(pred)) # we are printing the sum of a list with values 1 or 0 to find how many 1 it contains
# print("accuracy: {0}".format(accuracy_score(pred, labels_test)))
#########################################################
| gpl-3.0 |
gsganden/pitcher-reports | app.py | 1 | 11057 | from flask import Flask, render_template, request, redirect
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import sqlalchemy
import seaborn as sns
from sklearn.mixture import GMM
from matplotlib.ticker import NullFormatter
import matplotlib.patches as mpatches
import os
import urlparse
app = Flask(__name__)
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
database = url.path,
user = url.username,
password = url.password,
host = url.hostname,
port = url.port
scheme = url.scheme
engine = sqlalchemy.create_engine('%s://%s:%s@%s:%s/%s' %
(scheme, user[0], password[0], host[0],
port, database[0][1:]))
sns.set_context('notebook')
pitch_type_dict = dict(FA='fastball',
FF='four-seam fastball',
SI='sinker',
FC='fastball (cutter)',
SL='slider',
CH='changeup',
CU='curveball',
KC='knuckle-curve',
KN='knuckleball',
EP='eephus')
@app.route('/')
def main():
return redirect('/index')
@app.route('/index', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html')
else:
pitcher = request.form['pitcher']
season = int(request.form['season'])
try:
data, pitch_types = get_data(pitcher.lower(), season)
except:
return render_template('error.html')
return render_template('results.html',
repertoire_plot=plot_repertoire(data, pitch_types),
selection_plot=plot_selection(data, pitch_types),
location_plot=plot_location(data, pitch_types),
pitcher=pitcher.title(),
season=season)
def get_data(pitcher, season):
query = '''
SELECT *
FROM pitches_app
WHERE pitcher = '%s'
AND year = %s
''' % (pitcher, season)
data = pd.read_sql(query, engine)
pitch_types = sorted(list(data['pitch_type'].unique()))
# Make copy to avoid altering list while iterating over it
filtered_pitch_types = pitch_types[:]
# Get rid of very infrequent pitch types, which are mostly bad data
for pitch_type in pitch_types:
if float(data[data['pitch_type'] == pitch_type].shape[0])\
/ data.shape[0] < .02:
filtered_pitch_types.remove(pitch_type)
pitch_types = list(data[data['pitch_type'].isin(filtered_pitch_types)]
.groupby('pitch_type')
.agg({'start_speed': [np.size, np.mean]})
['start_speed']
.sort_values(by='mean')
.index)
return data, pitch_types
def plot_repertoire(data, pitch_types):
gaussians = []
gmms = []
for pitch_type in pitch_types:
gmm = GMM(covariance_type='full')
sub_data = data[data['pitch_type'] == pitch_type][['pfx_x',
'pfx_z',
'start_speed']]
gmm.fit(sub_data)
x = np.arange(-20, 20, 0.25)
y = np.arange(-20, 20, 0.25)
X, Y = np.meshgrid(x, y)
gmms.append(gmm)
gaussians\
.append(plt.mlab
.bivariate_normal(X,
Y,
sigmax=np.sqrt(gmm
._get_covars()
[0][0][0]),
sigmay=np.sqrt(gmm
._get_covars()
[0][1][1]),
sigmaxy=gmm._get_covars()[0][0][1],
mux=gmm.means_[0][0],
muy=gmm.means_[0][1]))
plt.figure(figsize=(8, 6))
plt.scatter(data['pfx_x'], data['pfx_z'], c=data['start_speed'],
alpha=.3, cmap='inferno', norm=Normalize(70, 100), s=10)
plt.xlim([-20, 20])
plt.ylim([-20, 20])
plt.yticks([-10, 0, 10])
plt.xticks([-10, 0, 10])
plt.ylabel('Vertical Break (Inches)')
plt.xlabel('Horizontal Break (Inches) ')
plt.colorbar().set_label('Velocity')
ax = plt.gca()
ax.text(.61,
.98,
''.join([pitch_type + ': ' + pitch_type_dict[pitch_type] + '\n'
for pitch_type in pitch_types])[:-1],
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes)\
.set_bbox(dict(color='w', alpha=0.3, edgecolor='k'))
for index in xrange(len(gaussians)):
plt.contour(X, Y, gaussians[index], 3, colors='k', alpha=.3)
ax.text(gmms[index].means_[0][0],
gmms[index].means_[0][1],
pitch_types[index],
ha='center',
va='center',
color='k',
size=10,
backgroundcolor='w')\
.set_bbox(dict(color='w', alpha=0.3, edgecolor='k'))
plt.tight_layout()
# Make Matplotlib write to BytesIO file object and grab
# return the object's string
from io import BytesIO
figfile = BytesIO()
plt.savefig(figfile, format='png')
figfile.seek(0) # rewind to beginning of file
import base64
figdata_png = base64.b64encode(figfile.getvalue())
return figdata_png
def plot_selection(data, pitch_types):
plt.figure(figsize=(10, 6))
for plot_num in range(1, 21):
plt.subplot(4, 5, plot_num)
num_strikes = ((plot_num - 1) // 5) - 1
num_balls = ((plot_num - 1) % 5) - 1
pitch_data = data.copy()
if num_balls > -1:
pitch_data = pitch_data[pitch_data['balls'] == num_balls]
if num_strikes > -1:
pitch_data = pitch_data[pitch_data['strikes'] == num_strikes]
num_pitches_to_righties = float(pitch_data[pitch_data['stand'] == 'R']
.shape[0])
num_pitches_to_lefties = float(pitch_data[pitch_data['stand'] == 'L']
.shape[0])
for index, pitch_type in enumerate(pitch_types):
filter_pitch_data = pitch_data[pitch_data['pitch_type'] ==
pitch_type]
try:
plt.scatter(index,
filter_pitch_data[filter_pitch_data['stand'] ==
'R']
.shape[0]/num_pitches_to_righties,
color='r',
alpha=.5)
except ZeroDivisionError:
pass
try:
plt.scatter(index,
filter_pitch_data[filter_pitch_data['stand'] ==
'L']
.shape[0]/num_pitches_to_lefties,
color='b',
alpha=.5)
except ZeroDivisionError:
pass
if plot_num > 15:
plt.gca().text(index, -.1, pitch_type, ha='center', fontsize=8)
plt.ylim([0, 1])
plt.xticks([])
if plot_num != 1:
plt.gca().yaxis.set_major_formatter(NullFormatter())
plt.subplot(4, 5, 1)
plt.title('Any Balls')
plt.ylabel('Any Strikes')
plt.subplot(4, 5, 2)
plt.title('0 Balls')
plt.subplot(4, 5, 3)
plt.title('1 Ball')
plt.subplot(4, 5, 4)
plt.title('2 Balls')
plt.subplot(4, 5, 5)
plt.title('3 Balls')
red_patch = mpatches.Patch(color='red', label='Righty batter')
blue_patch = mpatches.Patch(color='blue', label='Lefty batter')
plt.legend(handles=[red_patch, blue_patch])
plt.subplot(4, 5, 6)
plt.ylabel('0 Strikes')
plt.subplot(4, 5, 11)
plt.ylabel('1 Strike')
plt.subplot(4, 5, 16)
plt.ylabel('2 Strikes')
plt.tight_layout()
# Make Matplotlib write to BytesIO file object and grab
# return the object's string
from io import BytesIO
figfile = BytesIO()
plt.savefig(figfile, format='png')
figfile.seek(0) # rewind to beginning of file
import base64
figdata_png = base64.b64encode(figfile.getvalue())
return figdata_png
def plot_location(data, pitch_types):
plt.figure(figsize=(3, 2*len(pitch_types)))
righty_data = data[data['stand'] == 'R']
lefty_data = data[data['stand'] == 'L']
pitch_type_num = -1
for plot_num in range(1, 2 * len(pitch_types) + 1):
plot_index = plot_num - 1
if plot_index % 2 == 0: # new pitch type
pitch_type_num += 1
righty_pitch_data = righty_data[righty_data['pitch_type'] ==
pitch_types[pitch_type_num]]
lefty_pitch_data = lefty_data[lefty_data['pitch_type'] ==
pitch_types[pitch_type_num]]
plt.subplot(len(pitch_types), 2, plot_num)
plt.plot([-.7083, .7083, .7083, -.7083, -.7083],
[0, 0, 1, 1, 0]) # Strike zone
plt.ylim([-1.5, 2])
plt.xlim([-3, 3])
plt.xticks([])
plt.yticks([])
ax = plt.gca()
ax.yaxis.set_major_formatter(NullFormatter())
ax.xaxis.set_major_formatter(NullFormatter())
if plot_num % 2 == 1:
plt.scatter(righty_pitch_data['px'],
righty_pitch_data['pz'],
c='r', alpha=.2, s=10)
plt.ylabel(pitch_types[pitch_type_num], rotation=0)
ax.yaxis.labelpad = 15
else:
plt.scatter(lefty_pitch_data['px'],
lefty_pitch_data['pz'],
c='b', alpha=.2, s=10)
if plot_index == 0:
plt.title('Righty batters')
elif plot_index == 1:
plt.title('Lefty batters')
plt.tight_layout()
# Make Matplotlib write to BytesIO file object and grab
# return the object's string
from io import BytesIO
figfile = BytesIO()
plt.savefig(figfile, format='png')
figfile.seek(0) # rewind to beginning of file
import base64
figdata_png = base64.b64encode(figfile.getvalue())
return figdata_png
def get_results(results_file):
soup = BeautifulSoup(results_file, 'html.parser')
contents = ''
for item in soup.body.contents:
contents += '\n' + unicode(item)
return contents
if __name__ == '__main__':
app.run(port=33507, debug=False)
| mit |
jbkopecky/housebot | models/db_to_json.py | 1 | 4177 | import sqlite3
import pandas as pd
import hues
from collections import defaultdict
from tqdm import tqdm
import sklearn
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def reshape_tags(data, index=None, columns=None, values=None):
if (index is None) or (columns is None) or (values is None):
return None
cols = [str(x) for x in data[columns].unique()]
inds = [int(x) for x in data[index].unique()]
num_duplicates = defaultdict(lambda: defaultdict(set))
other_duplicates = defaultdict(lambda: defaultdict(set))
df = pd.DataFrame(columns=cols, index=inds)
for i in tqdm(range(len(data))):
ind, col, val = data.iloc[i][[index, columns, values]]
ind = int(ind)
old_val = df.loc[ind, col]
if pd.isnull(old_val):
val = 1 if val == '' else val
df.loc[ind, col] = val
else:
if val == "" or df.loc[ind, col] == "":
df.loc[ind, col] = val if val != "" else df.loc[ind,col]
elif is_number(val) and is_number(old_val):
if abs(int(val)) == abs(int(old_val)) and abs(int(val)) == 1:
df.loc[ind, col] = '1'
else:
num_duplicates[col][ind].add(old_val)
num_duplicates[col][ind].add(val)
else:
other_duplicates[col][ind].add(old_val)
other_duplicates[col][ind].add(val)
hues.warning("[Warning] Duplicate Value ! [ %s, %s ] : %s vs. %s" % (ind, col, df.loc[ind, col], val))
if len(other_duplicates) > 0:
hues("Cannot reshape: Non numerical duplicates ! %s" % other_duplicates)
else:
for col in num_duplicates.keys():
max_duplicate = max([len(x) for x in num_duplicates[col].values()])
col_names = [col] + [col + " " + str(n+1) for n in range(max_duplicate)[1:]]
for ind in num_duplicates[col].keys():
for i,v in enumerate(sorted(num_duplicates[col][ind])):
df.loc[ind,col_names[i]] = v
return df
def reshape_price(data):
indexes = [int(x) for x in sorted(data['ID'].unique())]
df = pd.DataFrame(columns=['prix','last_seen'], index=indexes)
for i in tqdm(range(len(data))):
ID, time, prix = data.iloc[i]
ID = int(ID)
time0 = df.loc[ID]['last_seen']
if not pd.isnull(time0):
if int(time0) > int(time):
continue
df.loc[ID,'prix'] = int(prix.replace(" ",""))
df.loc[ID,'last_seen'] = int(time)
return df
if __name__ == "__main__":
hues.info("Connecting")
con = sqlite3.connect("./data/raw_data.db")
hues.info("Importing annonces ...")
annonces = pd.read_sql_query("SELECT * FROM annonce", con, index_col='ID')
annonces = annonces[['arrondissement', 'agency_phone']]
annonces.index = [int(x) for x in annonces.index]
hues.info("Importing description ...")
descriptions = pd.read_sql_query("SELECT * FROM description", con, index_col='ID')
descriptions.index = [int(x) for x in descriptions.index]
hues.info("Importing prices ...")
prices = pd.read_sql_query("SELECT * FROM prix", con)
prices = reshape_price(prices)
hues.info("Importing tags ...")
tags = pd.read_sql_query("SELECT * FROM tags", con)
tags = tags[tags['tag_name']=='surface_m2']
tags = reshape_tags(tags, 'ID', 'tag_name', 'tag_value')
hues.info("* Merging into one dataframe...")
con.close()
data = pd.concat([annonces, descriptions, prices, tags], axis=1)
data = data[['last_seen', 'prix', 'surface_m2', 'description', 'arrondissement']]
data = data.dropna()
data['prix_per_m2'] = data['prix'].astype('float') / data['surface_m2'].astype('float')
data['surface_m2'] = data['surface_m2'].astype('float')
data = data[['last_seen', 'arrondissement', 'surface_m2', 'prix_per_m2']]
data.columns = ['date', 'z', 'x', 'y']
for arr in list(set(data['z'].values)):
data[data['z']==arr].to_json('../housebot_viz/data/%s.json' % str(arr).replace(" ", "_"), orient='index')
| mit |
sovicak/AnonymniAnalytici | 2018_02_15_cryptocurrencies_trading/ta-lib/talib/test_abstract.py | 1 | 9717 | import numpy as np
from nose.tools import (
assert_equals,
assert_true,
assert_false,
assert_raises,
)
try:
from collections import OrderedDict
except ImportError: # handle python 2.6 and earlier
from ordereddict import OrderedDict
import talib
from talib import func
from talib import abstract
from talib.test_data import ford_2012, assert_np_arrays_equal, assert_np_arrays_not_equal
def test_pandas():
import pandas
input_df = pandas.DataFrame(ford_2012)
input_dict = dict((k, pandas.Series(v)) for k, v in ford_2012.items())
expected_k, expected_d = func.STOCH(ford_2012['high'], ford_2012['low'], ford_2012['close']) # 5, 3, 0, 3, 0
output = abstract.Function('stoch', input_df).outputs
assert_true(isinstance(output, pandas.DataFrame))
assert_np_arrays_equal(expected_k, output['slowk'])
assert_np_arrays_equal(expected_d, output['slowd'])
output = abstract.Function('stoch', input_dict).outputs
assert_true(isinstance(output, list))
assert_np_arrays_equal(expected_k, output[0])
assert_np_arrays_equal(expected_d, output[1])
expected = func.SMA(ford_2012['close'], 10)
output = abstract.Function('sma', input_df, 10).outputs
assert_true(isinstance(output, pandas.Series))
assert_np_arrays_equal(expected, output)
output = abstract.Function('sma', input_dict, 10).outputs
assert_true(isinstance(output, np.ndarray))
assert_np_arrays_equal(expected, output)
def test_pandas_series():
import pandas
input_df = pandas.DataFrame(ford_2012)
output = talib.SMA(input_df['close'], 10)
expected = pandas.Series(func.SMA(ford_2012['close'], 10),
index=input_df.index)
pandas.util.testing.assert_series_equal(output, expected)
# Test kwargs
output = talib.SMA(real=input_df['close'], timeperiod=10)
pandas.util.testing.assert_series_equal(output, expected)
# Test talib.func API
output = func.SMA(input_df['close'], timeperiod=10)
pandas.util.testing.assert_series_equal(output, expected)
# Test multiple outputs such as from BBANDS
_, output, _ = talib.BBANDS(input_df['close'], 10)
expected = pandas.Series(func.BBANDS(ford_2012['close'], 10)[1],
index=input_df.index)
pandas.util.testing.assert_series_equal(output, expected)
def test_SMA():
expected = func.SMA(ford_2012['close'], 10)
assert_np_arrays_equal(expected, abstract.Function('sma', ford_2012, 10).outputs)
assert_np_arrays_equal(expected, abstract.Function('sma')(ford_2012, 10, price='close'))
assert_np_arrays_equal(expected, abstract.Function('sma')(ford_2012, timeperiod=10))
expected = func.SMA(ford_2012['open'], 10)
assert_np_arrays_equal(expected, abstract.Function('sma', ford_2012, 10, price='open').outputs)
assert_np_arrays_equal(expected, abstract.Function('sma', price='low')(ford_2012, 10, price='open'))
assert_np_arrays_not_equal(expected, abstract.Function('sma', ford_2012, 10, price='open')(timeperiod=20))
assert_np_arrays_not_equal(expected, abstract.Function('sma', ford_2012)(10, price='close'))
assert_np_arrays_not_equal(expected, abstract.Function('sma', 10)(ford_2012, price='high'))
assert_np_arrays_not_equal(expected, abstract.Function('sma', price='low')(ford_2012, 10))
input_arrays = {'foobarbaz': ford_2012['open']}
assert_np_arrays_equal(expected, abstract.SMA(input_arrays, 10, price='foobarbaz'))
def test_STOCH():
# check defaults match
expected_k, expected_d = func.STOCH(ford_2012['high'], ford_2012['low'], ford_2012['close']) # 5, 3, 0, 3, 0
got_k, got_d = abstract.Function('stoch', ford_2012).outputs
assert_np_arrays_equal(expected_k, got_k)
assert_np_arrays_equal(expected_d, got_d)
expected_k, expected_d = func.STOCH(ford_2012['high'], ford_2012['low'], ford_2012['close'])
got_k, got_d = abstract.Function('stoch', ford_2012)(5, 3, 0, 3, 0)
assert_np_arrays_equal(expected_k, got_k)
assert_np_arrays_equal(expected_d, got_d)
expected_k, expected_d = func.STOCH(ford_2012['high'], ford_2012['low'], ford_2012['close'], 15)
got_k, got_d = abstract.Function('stoch', ford_2012)(15, 5, 0, 5, 0)
assert_np_arrays_not_equal(expected_k, got_k)
assert_np_arrays_not_equal(expected_d, got_d)
expected_k, expected_d = func.STOCH(ford_2012['high'], ford_2012['low'], ford_2012['close'], 15, 5, 1, 5, 1)
got_k, got_d = abstract.Function('stoch', ford_2012)(15, 5, 1, 5, 1)
assert_np_arrays_equal(expected_k, got_k)
assert_np_arrays_equal(expected_d, got_d)
def test_doji_candle():
expected = func.CDLDOJI(ford_2012['open'], ford_2012['high'], ford_2012['low'], ford_2012['close'])
got = abstract.Function('CDLDOJI').run(ford_2012)
assert_np_arrays_equal(got, expected)
def test_MAVP():
mavp = abstract.MAVP
assert_raises(Exception, mavp.set_input_arrays, ford_2012)
input_d = {}
input_d['close'] = ford_2012['close']
input_d['periods'] = np.arange(30)
assert_true(mavp.set_input_arrays(input_d))
assert_equals(mavp.input_arrays, input_d)
def test_info():
stochrsi = abstract.Function('STOCHRSI')
stochrsi.input_names = {'price': 'open'}
stochrsi.parameters = {'fastd_matype': talib.MA_Type.EMA}
expected = {
'display_name': 'Stochastic Relative Strength Index',
'function_flags': ['Function has an unstable period'],
'group': 'Momentum Indicators',
'input_names': OrderedDict([('price', 'open')]),
'name': 'STOCHRSI',
'output_flags': OrderedDict([
('fastk', ['Line']),
('fastd', ['Line']),
]),
'output_names': ['fastk', 'fastd'],
'parameters': OrderedDict([
('timeperiod', 14),
('fastk_period', 5),
('fastd_period', 3),
('fastd_matype', 1),
]),
}
assert_equals(expected, stochrsi.info)
expected = {
'display_name': 'Bollinger Bands',
'function_flags': ['Output scale same as input'],
'group': 'Overlap Studies',
'input_names': OrderedDict([('price', 'close')]),
'name': 'BBANDS',
'output_flags': OrderedDict([
('upperband', ['Values represent an upper limit']),
('middleband', ['Line']),
('lowerband', ['Values represent a lower limit']),
]),
'output_names': ['upperband', 'middleband', 'lowerband'],
'parameters': OrderedDict([
('timeperiod', 5),
('nbdevup', 2),
('nbdevdn', 2),
('matype', 0),
]),
}
assert_equals(expected, abstract.Function('BBANDS').info)
def test_input_names():
expected = OrderedDict([('price', 'close')])
assert_equals(expected, abstract.Function('MAMA').input_names)
# test setting input_names
obv = abstract.Function('OBV')
expected = OrderedDict([
('price', 'open'),
('prices', ['volume']),
])
obv.input_names = expected
assert_equals(obv.input_names, expected)
obv.input_names = {
'price': 'open',
'prices': ['volume'],
}
assert_equals(obv.input_names, expected)
def test_input_arrays():
mama = abstract.Function('MAMA')
# test default setting
expected = {
'open': None,
'high': None,
'low': None,
'close': None,
'volume': None,
}
assert_equals(expected, mama.get_input_arrays())
# test setting/getting input_arrays
assert_true(mama.set_input_arrays(ford_2012))
assert_equals(mama.get_input_arrays(), ford_2012)
assert_raises(Exception,
mama.set_input_arrays, {'hello': 'fail', 'world': 'bye'})
# test only required keys are needed
willr = abstract.Function('WILLR')
reqd = willr.input_names['prices']
input_d = dict([(key, ford_2012[key]) for key in reqd])
assert_true(willr.set_input_arrays(input_d))
assert_equals(willr.input_arrays, input_d)
# test extraneous keys are ignored
input_d['extra_stuffs'] = 'you should never see me'
input_d['date'] = np.random.rand(100)
assert_true(willr.set_input_arrays(input_d))
# test missing keys get detected
input_d['open'] = ford_2012['open']
input_d.pop('close')
assert_raises(Exception, willr.set_input_arrays, input_d)
# test changing input_names on the Function
willr.input_names = {'prices': ['high', 'low', 'open']}
assert_true(willr.set_input_arrays(input_d))
def test_parameters():
stoch = abstract.Function('STOCH')
expected = OrderedDict([
('fastk_period', 5),
('slowk_period', 3),
('slowk_matype', 0),
('slowd_period', 3),
('slowd_matype', 0),
])
assert_equals(expected, stoch.parameters)
stoch.parameters = {'fastk_period': 10}
expected['fastk_period'] = 10
assert_equals(expected, stoch.parameters)
stoch.parameters = {'slowk_period': 8, 'slowd_period': 5}
expected['slowk_period'] = 8
expected['slowd_period'] = 5
assert_equals(expected, stoch.parameters)
stoch.parameters = {'slowd_matype': talib.MA_Type.T3}
expected['slowd_matype'] = 8
assert_equals(expected, stoch.parameters)
stoch.parameters = {
'slowk_matype': talib.MA_Type.WMA,
'slowd_matype': talib.MA_Type.EMA,
}
expected['slowk_matype'] = 2
expected['slowd_matype'] = 1
assert_equals(expected, stoch.parameters)
def test_lookback():
assert_equals(abstract.Function('SMA', 10).lookback, 9)
stochrsi = abstract.Function('stochrsi', 20, 5, 3)
assert_equals(stochrsi.lookback, 26)
| mit |
altairpearl/scikit-learn | sklearn/gaussian_process/tests/test_gpc.py | 11 | 6079 | """Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils.testing import (assert_true, assert_greater,
assert_almost_equal, assert_array_equal)
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=0.1), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
def test_predict_consistent():
""" Check binary predict decision has also predicted probability above 0.5.
"""
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])))
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_custom_optimizer():
""" Test that GPC can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_multi_class():
""" Test GPC for multi-class classification problems. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
def test_multi_class_n_jobs():
""" Test that multi-class GPC produces identical results with n_jobs>1. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
| bsd-3-clause |
h-mayorquin/coursera | neural_data/04_exercise/problem_set4.py | 1 | 10735 | #
# NAME
# problem_set4.py
#
# DESCRIPTION
# In Problem Set 4, you will classify EEG data into NREM sleep stages and
# create spectrograms and hypnograms.
#
from __future__ import division
import numpy as np
import matplotlib.pylab as plt
import matplotlib.mlab as m
from sklearn.linear_model import LinearRegression, LogisticRegression
def load_examples(filename):
"""
load_examples takes the file name and reads in the data. It returns an
array containing the 4 examples of the 4 stages in its rows (row 0 = REM;
1 = stage 1 NREM; 2 = stage 2; 3 = stage 3 and 4) and the sampling rate for
the data in Hz (samples per second).
"""
data = np.load(filename)
return data['examples'], int(data['srate'])
def load_eeg(filename):
"""
load_eeg takes the file name and reads in the data. It returns an
array containing EEG data and the sampling rate for
the data in Hz (samples per second).
"""
data = np.load(filename)
return data['eeg'], int(data['srate'])
def load_stages(filename):
"""
load_stages takes the file name and reads in the stages data. It returns an
array containing the correct stages (one for each 30s epoch)
"""
data = np.load(filename)
return data['stages']
def plot_example_psds(example, rate):
"""
This function creates a figure with 4 lines to show the overall psd for
the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1,
2 and 3/4)
"""
NFFT = 512
rem = example[0]
nrem_stage1 = example[1]
nrem_stage2 = example[2]
nrem_stage34 = example[3]
plt.figure()
##YOUR CODE HERE
Pxx, freqs = m.psd(rem, NFFT=NFFT, Fs=rate)
Pxx = Pxx / np.sum(Pxx) # normalize
plt.plot(freqs, Pxx, label='rem')
plt.hold(True)
Pxx, freqs = m.psd(nrem_stage1, NFFT=NFFT, Fs=rate)
Pxx = Pxx / np.sum(Pxx) # normalize
plt.plot(freqs, Pxx, label='nrem stage 1')
Pxx, freqs = m.psd(nrem_stage2, NFFT=NFFT, Fs=rate)
Pxx = Pxx / np.sum(Pxx) # normalize
plt.plot(freqs, Pxx, label='nrem stage 2')
Pxx, freqs = m.psd(nrem_stage34, NFFT=NFFT, Fs=rate)
Pxx = Pxx / np.sum(Pxx) # normalize
plt.plot(freqs, Pxx, label='nrem stage 3/4')
# Plot details
plt.xlim([0, 20])
plt.legend()
plt.show()
return
def plot_example_spectrograms(example,rate):
"""
This function creates a figure with spectrogram sublpots to of the four
sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1,
2 and 3/4)
"""
NFFT = 2**8
rem = example[0]
nrem_stage1 = example[1]
nrem_stage2 = example[2]
nrem_stage34 = example[3]
plt.figure()
plt.hold(True)
###YOUR CODE HERE
plt.subplot(2, 2, 1)
Pxx, freqs, bins, im = plt.specgram(rem, NFFT=NFFT, Fs=rate)
plt.title('Rem Sleep')
plt.ylim((0,30))
plt.xlabel('Time')
plt.ylabel('Frequency')
plt.subplot(2, 2, 2)
Pxx, freqs, bins, im = plt.specgram(nrem_stage1, NFFT=NFFT, Fs=rate)
plt.title('Non-rem stage 1')
plt.ylim((0,30))
plt.xlabel('Time')
plt.ylabel('Frequency')
plt.subplot(2, 2, 3)
Pxx, freqs, bins, im = plt.specgram(nrem_stage2, NFFT=NFFT, Fs=rate)
plt.title('Non-rem stage 2')
plt.ylim((0,30))
plt.xlabel('Time')
plt.ylabel('Frequency')
plt.subplot(2, 2, 4)
Pxx, freqs, bins, im = plt.specgram(nrem_stage34, NFFT=NFFT, Fs=rate)
plt.title('Non-rem stage 3/4')
plt.ylim((0,30))
plt.xlabel('Time')
plt.ylabel('Frequency')
plt.show()
# return Pxx, freqs
def predictor():
example_filename = 'example_stages.npz'
examples, srate = load_examples(example_filename)
NFFT = 2**8
rate = srate
number_of_features = 45
c = 1e5
nrem1 = examples[1]
nrem2 = examples[2]
nrem3 = examples[3]
Pxx1, freqs, bins, im = plt.specgram(nrem1, NFFT=NFFT, Fs=rate)
Pxx2, freqs, bins, im = plt.specgram(nrem2, NFFT=NFFT, Fs=rate)
Pxx3, freqs, bins, im = plt.specgram(nrem3, NFFT=NFFT, Fs=rate)
plt.show()
Normalize = False
# Normalize
if Normalize:
sum = np.sum(Pxx1, axis=1)
Pxx1 = Pxx1 / sum[:,np.newaxis]
sum = np.sum(Pxx2, axis=1)
Pxx2 = Pxx2 / sum[:,np.newaxis]
sum = np.sum(Pxx3, axis=1)
Pxx3 = Pxx3 / sum[:,np.newaxis]
x1 = np.arange(0,8)
x2 = np.arange(10,15)
x4 = np.concatenate((x1,x2))
x4 = np.arange(0,15)
if True:
Pxx1 = Pxx1[0:number_of_features, :]
Pxx2 = Pxx2[0:number_of_features, :]
Pxx3 = Pxx3[0:number_of_features, :]
if False:
Pxx1 = Pxx1[x4, :]
Pxx2 = Pxx2[x4, :]
Pxx3 = Pxx3[x4, :]
value1 = 1
value2 = 2
value3 = 3
y1 = np.ones(Pxx1.shape[1]) * value1
y2 = np.ones(Pxx2.shape[1]) * value2
y3 = np.ones(Pxx3.shape[1]) * value3
x = np.hstack([Pxx1, Pxx2, Pxx3])
x = x.transpose()
y = np.hstack([y1, y2, y3])
lr = LogisticRegression(C=c, penalty='l1')
lr.fit(x, y)
return lr
def classify_epoch(epoch , rate, lr):
"""
This function returns a sleep stage classification (integers: 1 for NREM
stage 1, 2 for NREM stage 2, and 3 for NREM stage 3/4) given an epoch of
EEG and a sampling rate.
"""
NFFT = 2**8
Pxx, freqs = m.psd(epoch, NFFT=NFFT, Fs=rate)
p = map(lr.predict, Pxx[np.newaxis,0:45])
p = np.array(p)
p = p[:,0]
stage = int(p)
###YOUR CODE HERE
return stage
def plot_hypnogram(eeg, stages, srate, title):
"""
This function takes the eeg, the stages and sampling rate and draws a
hypnogram over the spectrogram of the data.
"""
NFFT = 2**8
fig,ax1 = plt.subplots() #Needed for the multiple y-axes
#Use the specgram function to draw the spectrogram as usual
Pxx, freqs, bins, im = plt.specgram(eeg, NFFT=NFFT, Fs=srate)
#Label your x and y axes and set the y limits for the spectrogram
plt.ylim((0,30))
plt.xlim((0,3600))
plt.xlabel('Time (seconds)')
plt.ylabel('Frequency (Hz)')
ax2 = ax1.twinx() #Necessary for multiple y-axes
#Use ax2.plot to draw the hypnogram. Be sure your x values are in seconds
#HINT: Use drawstyle='steps' to allow step functions in your plot
t = np.arange(0, stages.size) * 30
ax2.plot(t, stages, drawstyle='steps')
#Label your right y-axis and change the text color to match your plot
ax2.set_ylabel('NREM Sleep Stage',color='b')
plt.xlim((0,3600))
#Set the limits for the y-axis
plt.ylim((0.5,3.5))
#Only display the possible values for the stages
ax2.set_yticks(np.arange(1,4))
#Change the left axis tick color to match your plot
for t1 in ax2.get_yticklabels():
t1.set_color('b')
#Title your plot
plt.title(title)
def classifier_tester(classifiedEEG, actualEEG):
"""
returns percent of 30s epochs correctly classified
"""
epochs = len(classifiedEEG)
incorrect = np.nonzero(classifiedEEG-actualEEG)[0]
percorrect = (epochs - len(incorrect))/epochs*100
print 'EEG Classifier Performance: '
print ' Correct Epochs = ' + str(epochs-len(incorrect))
print ' Incorrect Epochs = ' + str(len(incorrect))
print ' Percent Correct= ' + str(percorrect)
print
return percorrect
def test_examples(examples, srate, lr):
"""
This is one example of how you might write the code to test the provided
examples.
"""
i = 0
bin_size = int(30*srate)
c = np.zeros((4,int(len(examples[1,:])/bin_size)))
while i + bin_size < len(examples[1,:]):
for j in range(1,4):
c[j,int(i/bin_size)] = classify_epoch(examples[j, range(i,i+bin_size)],srate, lr)
i = i + bin_size
totalcorrect = 0
num_examples = 0
for j in range(1,4):
canswers = np.ones(len(c[j,:]))*j
correct = classifier_tester(c[j,:],canswers)
totalcorrect = totalcorrect + correct
num_examples = num_examples + 1
average_percent_correct = totalcorrect/num_examples
print 'Average Percent Correct= ' + str(average_percent_correct)
return average_percent_correct
def classify_eeg(eeg,srate, lr):
"""
DO NOT MODIFY THIS FUNCTION
classify_eeg takes an array of eeg amplitude values and a sampling rate and
breaks it into 30s epochs for classification with the classify_epoch function.
It returns an array of the classified stages.
"""
bin_size_sec = 30
bin_size_samp = int(bin_size_sec*srate)
t = 0
classified = np.zeros(int(len(eeg)/bin_size_samp))
while t + bin_size_samp < len(eeg):
classified[int(t/bin_size_samp)] = classify_epoch(eeg[range(t,t+bin_size_samp)],srate, lr)
t = t + bin_size_samp
return classified
##########################
#You can put the code that calls the above functions down here
if __name__ == "__main__":
#YOUR CODE HERE
plt.close('all') #Closes old plots
##PART 1
#Load the example data
example_filename = 'example_stages.npz'
examples, srate = load_examples(example_filename)
#Plot the psds
#plot_example_psds(examples, srate)
#Plot the spectrograms
#plot_example_spectrograms(examples, srate)
# predictor
lr = predictor()
#Test the examples
#test_examples(examples, srate, lr)
#Load the practice data
practice_eeg, srate = load_eeg('practice_eeg.npz')
#Load the practice answersp
practice_stages = load_stages('practice_answers.npz')
#Classify the practice data
#classified = classify_eeg(practice_eeg ,srate, lr)
#Check your performance
#classifier_tester(classified, practice_stages)
#Generate the hypnogram plots
eeg, srate = load_eeg('test_eeg.npz')
stages = classify_eeg(eeg, srate, lr)
#plot hynporgram
#plot_hypnogram(eeg, stages, srate, 'Hypnogram - Test Data Set')
plot_hypnogram(practice_eeg, practice_stages, srate, 'Hynpogram - Practice Data Set')
plt.show()
if False:
plot_hypnogram(practice_eeg, classified, srate, 'mine')
plt.show()
plot_hypnogram(practice_eeg, practice_stages, srate, 'correct')
plt.show()
| bsd-2-clause |
RBDA-F17/crime | code_drop_1/clean_taxi_columns.py | 1 | 12643 | import os
import sys
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
from pyspark.sql import Row, Column
from pyspark.sql.functions import *
from pyspark.sql.functions import udf
from datetime import datetime
# from osgeo import *
# from osgeo import ogr
import numpy as np
import pandas as pd
sc= SparkContext()
sqlContext = SQLContext(sc)
# This part is not working in spark yet. We might have to do it after we have the DF.
#read your shapefile
#DOES NOT WORK
# drv = ogr.GetDriverByName('ESRI Shapefile')
# ds_in = drv.Open("../data/taxi_zones/taxi_zones_clean.shp")
# lyr_in = ds_in.GetLayer(0)
# geo_ref = lyr_in.GetSpatialRef()
# idx_reg = lyr_in.GetLayerDefn().GetFieldIndex("LocationID")
# def check_zone(lon=-73.991957, lat=40.721567):
# """Checks the taxi zone for a lon,lat"""
# drv = ogr.GetDriverByName('ESRI Shapefile')
# ds_in = drv.Open("../data/taxi_zones/taxi_zones_clean.shp")
# lyr_in = ds_in.GetLayer(0)
# geo_ref = lyr_in.GetSpatialRef()
# idx_reg = lyr_in.GetLayerDefn().GetFieldIndex("LocationID")
# pt = ogr.Geometry(ogr.wkbPoint)
# pt.SetPoint_2D(0, lon, lat)
# lyr_in.SetSpatialFilter(pt)
# for feat_in in lyr_in:
# print feat_in.GetFieldAsString(idx_reg)
# # if loc:
# # return loc
# # pickup_longitude=-73.991957
# # pickup_latitude=40.721567
# #
# #
# # print "%f,%f" % (point.GetX(), point.GetY())
# taxi_shp='../data/taxi_zones/taxi_zones_clean.shp'
# drv = ogr.GetDriverByName('ESRI Shapefile')
# ds_in = drv.Open(taxi_shp)
# lyr_in = ds_in.GetLayer(0)
# geo_ref = lyr_in.GetSpatialRef()
# idx_reg = lyr_in.GetLayerDefn().GetFieldIndex("LocationID")
# # dstlayer = dstshp.CreateLayer('0',geom_type=ogr.wkbPolygon)
# # geojson = """{"type":"Point","coordinates":[-73.991957,40.721567]}"""
# # point = ogr.CreateGeometryFromJson(geojson)
# # pt.SetPoint_2D(0, lon, lat)
# # lyr_in.SetSpatialFilter(pt)
# pt = ogr.Geometry(ogr.wkbPoint)
# pt.SetPoint_2D(0, -73.991957,40.721567)
# # lyr_in.SetSpatialFilter(pt)
# lyr_in.Intersection(pt)
# # for feat_in in lyr_in:
# # print feat_in.Intersection(pt)
# layer = ogr.Geometry(3)
# intersection = layer.Intersection(pt)
# get_zone_id(pickup_longitude,pickup_longitude)
# check_zone_udf = udf( lambda x,y: check_zone(x,y) ,StringType())
# zones = pd.read_csv('../data/taxi_zones/taxi_zones_clean.csv',sep=' ',
# header=None, names = ['lon','lat','LocationID'])
schema = StructType([
StructField("vendor_name",StringType(), True),
StructField("pickup_datetime",TimestampType(), True),
StructField("dropoff_datetime",TimestampType(), True),
StructField("passenger_count",IntegerType(), True),
StructField("trip_distance",DoubleType(), True),
StructField("pickup_longitude",DoubleType(), True),
StructField("pickup_latitude",DoubleType(), True),
StructField("rate_code",StringType(), True),
StructField("store_and_fwd_flag",StringType(), True),
StructField("dropoff_longitude",DoubleType(), True),
StructField("dropoff_latitude",DoubleType(), True),
StructField("payment_type",StringType(), True),
StructField("fare_amount",DoubleType(), True),
StructField("extra",DoubleType(), True),
StructField("mta_tax",DoubleType(), True),
StructField("tip_amount",DoubleType(), True),
StructField("tolls_amount",DoubleType(), True),
StructField("improvement_surcharge",DoubleType(), True),
StructField("total_amount",DoubleType(), True),
StructField("pickup_location_id",StringType(), True),
StructField("dropoff_location_id",StringType(), True),
])
user = os. environ['USER']
if user not in ['cpa253','vaa238','vm1370']:
user = 'cpa253'
def clean_vendor_name(x):
if x in ['VTS','CMT','DDS']:
return x
vendor_dict= {'1':'CMT','2':'VTS',"3":'DDS'}
return vendor_dict[x]
def to_date(x):
return datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
def clean_passenger_count(x):
out = int(x)
if out > 9:
return None
else:
return out
def clean_rate_code(x):
try:
if int(x) not in xrange(1,7):
return None
else:
rate_code_dict ={
"1":"Standard rate",
"2":"JFK",
"3":"Newark",
"4":"Nassau or Westchester",
"5":"Negotiated fare",
"6":"Group ride",
}
return rate_code_dict[x]
except ValueError:
return None
def clean_store_flag(x):
flag_dict= {
' ': None,
'':None,
'*':None,
'2':None,
"1":"Stored",
"Y":"Stored",
"0":"Not a stored",
"N":"Not a stored",
}
return flag_dict[x]
def clean_payment_type(x):
# ONLYU AFTER 2009
pay_dict={
"1":"Credit card",
"2":"Cash",
"3":"No charge",
"NO CHARGE":"No charge",
"4":"Dispute",
"5":"Unknown",
"6":"Voided trip",
'Cas':"Cash",
'CRE':"Credit card",
'CREDIT':"Credit card",
'CRD':"Credit card",
'CAS':"Cash",
'CASH':"Cash",
'CSH':"Cash",
'Dis':"Dispute",
'DIS':"Dispute",
"DISPUTE":"Dispute",
'Cre':"Credit card",
'No ':"No charge",
'NO ':"No charge",
'NOC':"No charge",
'NA ':"Unknown",
'UNK':"Unknown",
}
return pay_dict[x.upper()]
def to_double(x):
try:
return float(x)
except ValueError:
return None
def clean_imp_sur(x):
if y < 2015:
return 0.0
else:
return float(x)
zones_file = '/user/%s/rbda/crime/data/taxi_zones/taxi_zones_clean.csv' % (user)
zones_rdd = sc.textFile(zones_file).\
map(lambda x: x.split(' ')).\
map(lambda (lon,lat,LocationID): (1,lon,lat,LocationID))
zones = pd.read_csv('../data/taxi_zones/taxi_zones_clean.csv', sep =' ', names=['lon','lat','LocationID'])
def get_zone_id(lon,lat, pd_data):
if not lon:
return None
if not lon:
return None
pd_data['dist'] = (pd_data.lon - lon)**2 + (pd_data.lat - lat)**2
ind=pd_data.idxmin(axis=0)['dist']
out = pd_data.LocationID[ind]
return str(out)
# get_zone_broad=sc.broadcast(get_zone_id)
zones_broad=sc.broadcast(zones)
# def get_zone_id(lon,lat):
# if not lon:
# return None
# if not lon:
# return None
# dist = zones.map(lambda (lo, la, LocationID):
# ( ( float(lo) - lon )**2 + (float(la) - lat)**2 , LocationID ) ).min(lambda x: x[0])
# # for row in zones.itertuples():
# # point_y = np.array( (row.lon,row.lat) )
# # dist = np.linalg.norm(point - point_y)
# # if( dist < min_dist):
# # min_dist = dist
# # min_loc = row.LocationID
# return dist[1]
# pickup_longitude=-73.991957
# pickup_latitude=40.721567
# get_zone_id(pickup_longitude,pickup_latitude)
# get_zone_id(-73.901408 ,40.906096)
# time( get_zone_id(pickup_longitude,pickup_latitude) )
# This is the schem
def to_row(l):
"""This function filters the rows form the csv that have an incorrect number of columns and returns a Row for spark DataFrame """
r = Row(
"vendor_name",
"pickup_datetime",
"dropoff_datetime",
"passenger_count",
"trip_distance",
"pickup_longitude",
"pickup_latitude",
"rate_code",
"store_and_fwd_flag",
"dropoff_longitude",
"dropoff_latitude",
"payment_type",
"fare_amount",
"extra",
"mta_tax",
"tip_amount",
"tolls_amount",
"improvement_surcharge",
"total_amount",
"pickup_location_id",
"dropoff_location_id",
)
if y <= 2014:
out = r(
clean_vendor_name(l[0]), #vendor_name
to_date(l[1]), #pickup_datetime
to_date(l[2]), #dropoff_datetime
clean_passenger_count(l[3]), #passenger_count
to_double(l[4]), #trip_distance
to_double(l[5]), #pickup_longitude
to_double(l[6]), #pickup_latitude
clean_rate_code(l[7]), #rate_code
clean_store_flag(l[8]), #store_and_fwd_flag
to_double(l[9]), #dropoff_longitude
to_double(l[10]), #dropoff_latitude
clean_payment_type(l[11]), #payment_type
to_double(l[12]), #fare_amount
to_double(l[13]), #extra
to_double(l[14]), #mta_tax
to_double(l[15]), #tip_amount
to_double(l[16]), #tolls_amount
0.0, #improvement_surcharge
to_double(l[17]), #total_amount
# None,
# None,
get_zone_id(to_double(l[5]),to_double(l[6]), zones_broad.value ),
get_zone_id(to_double(l[9]),to_double(l[10]),zones_broad.value),
)
return out
if y == 2015:
out = r(
clean_vendor_name(l[0]), #vendor_name
to_date(l[1]), #pickup_datetime
to_date(l[2]), #dropoff_datetime
clean_passenger_count(l[3]), #passenger_count
to_double(l[4]), #trip_distance
to_double(l[5]), #pickup_longitude
to_double(l[6]), #pickup_latitude
clean_rate_code(l[7]), #rate_code
clean_store_flag(l[8]), #store_and_fwd_flag
to_double(l[9]), #dropoff_longitude
to_double(l[10]), #dropoff_latitude
clean_payment_type(l[11]), #payment_type
to_double(l[12]), #fare_amount
to_double(l[13]), #extra
to_double(l[14]), #mta_tax
to_double(l[15]), #tip_amount
to_double(l[16]), #tolls_amount
clean_imp_sur(17), #improvement_surcharge
to_double(l[18]), #total_amount
get_zone_id(to_double(l[5]),to_double(l[6]), zones_broad.value ),
get_zone_id(to_double(l[9]),to_double(l[10]),zones_broad.value),
)
return out
if y == 2016 and m <= 6:
out = r(
clean_vendor_name(l[0]), #vendor_name
to_date(l[1]), #pickup_datetime
to_date(l[2]), #dropoff_datetime
clean_passenger_count(l[3]), #passenger_count
to_double(l[4]), #trip_distance
to_double(l[5]), #pickup_longitude
to_double(l[6]), #pickup_latitude
clean_rate_code(l[7]), #rate_code
clean_store_flag(l[8]), #store_and_fwd_flag
to_double(l[9]), #dropoff_longitude
to_double(l[10]), #dropoff_latitude
clean_payment_type(l[11]), #payment_type
to_double(l[12]), #fare_amount
to_double(l[13]), #extra
to_double(l[14]), #mta_tax
to_double(l[15]), #tip_amount
to_double(l[16]), #tolls_amount
clean_imp_sur(17), #improvement_surcharge
to_double(l[18]), #total_amount
get_zone_id(to_double(l[5]),to_double(l[6]), zones_broad.value ),
get_zone_id(to_double(l[9]),to_double(l[10]),zones_broad.value),
)
return out
else:
out = r(
clean_vendor_name(l[0]), #vendor_name
to_date(l[1]), #pickup_datetime
to_date(l[2]), #dropoff_datetime
clean_passenger_count(l[3]), #passenger_count
to_double(l[4]), #trip_distance
None,# to_double(l[5]), #pickup_longitude
None,# to_double(l[6]), #pickup_latitude
clean_rate_code(l[5]), #rate_code
clean_store_flag(l[6]), #store_and_fwd_flag
None,# to_double(l[9]), #dropoff_longitude
None,# to_double(l[10]), #dropoff_latitude
clean_payment_type(l[9]), #payment_type
to_double(l[10]), #fare_amount
to_double(l[11]), #extra
to_double(l[12]), #mta_tax
to_double(l[13]), #tip_amount
to_double(l[14]), #tolls_amount
clean_imp_sur(15), #improvement_surcharge
to_double(l[16]), #total_amount
l[7], #PICK ID
l[8], #DROP ID
)
return out
def filter_rows(l):
if y < 2015 and len(l)==18:
return True
if y == 2015 and len(l)==19:
return True
if y == 2016 and m <= 6 and len(l)== 19:
return True
if y == 2016 and m > 6 and len(l)== 17:
return True
if y == 2017 and len(l)== 17:
return True
return False
# udf_get_zone_id = udf(lambda x, y: get_zone_id(x,y) , StringType())
## Clean 2009 - 2016
for y in xrange(2009,2017):
for m in xrange(1,13):
# Get the file
file_name = '/user/%s/rbda/crime/data/taxi_data/yellow/yellow_tripdata_%d-%02d.csv' % (user,y,m)
print "Cleaning file:\n\t%s" % file_name
# Parse the csv by ',' and get a Row object
dat = sc.textFile(file_name).\
map(lambda l: l.split(",")).\
filter(filter_rows).\
map( lambda l: to_row(l) )
# Create a DF with the schema.
df = sqlContext.createDataFrame(dat,schema)
# Output to save as partition table
output_folder = '/user/%s/rbda/crime/data/taxi_data_clean/yellow/year=%d/month=%02d' %(user,y,m)
print 'Saving to hdfs://%s' % output_folder
df.write.mode('ignore').save(output_folder)
## Clean current year's data
for y in xrange(2017,2018):
for m in xrange(1,7):
# Get the file
file_name = '/user/%s/rbda/crime/data/taxi_data/yellow/yellow_tripdata_%d-%02d.csv' % (user,y,m)
print "Cleaning file:\n\t%s" % file_name
# Parse the csv by ',' and get a Row object
dat = sc.textFile(file_name).\
map(lambda l: l.split(",")).\
filter(filter_rows).\
map( lambda l: to_row(l) )
# Create a DF with the schema.
df = sqlContext.createDataFrame(dat,schema)
# Output to save as partition table
output_folder = '/user/%s/rbda/crime/data/taxi_data_clean/yellow/year=%d/month=%02d' %(user,y,m)
print 'Saving to hdfs://%s' % output_folder
df.write.mode('ignore').save(output_folder)
| gpl-3.0 |
sinhrks/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
litaotao/mpld3 | mpld3/tests/test_figure.py | 21 | 1552 | """
Test creation of a figure
"""
import matplotlib.pyplot as plt
from .. import fig_to_dict
from numpy.testing import assert_equal
def test_basic_figure():
size = (8, 6)
dpi = 80
fig = plt.figure(figsize=size, dpi=dpi)
rep = fig_to_dict(fig)
plt.close(fig)
assert_equal(list(sorted(rep.keys())),
['axes', 'data', 'height', 'id', 'plugins', 'width'])
assert_equal(rep['width'], size[0] * dpi)
assert_equal(rep['height'], size[1] * dpi)
assert_equal(rep['data'], {})
assert_equal(rep['axes'], [])
def test_axes():
bbox = [0.1, 0.1, 0.8, 0.8]
xlim = [-10, 10]
ylim = [-20, 20]
fig = plt.figure()
ax = fig.add_axes(bbox)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
rep = fig_to_dict(fig)
axrep = rep['axes'][0]
assert_equal(list(sorted(axrep.keys())),
['axes', 'axesbg', 'axesbgalpha', 'bbox', 'collections',
'id', 'images', 'lines', 'markers', 'paths', 'sharex',
'sharey', 'texts', 'xdomain', 'xlim', 'xscale', 'ydomain',
'ylim', 'yscale', 'zoomable'])
for key in ['collections', 'images', 'lines', 'markers', 'paths', 'texts']:
assert_equal(axrep[key], [])
for key in ['xlim', 'xdomain']:
assert_equal(axrep[key], xlim)
for key in ['ylim', 'ydomain']:
assert_equal(axrep[key], ylim)
for key in ['xscale', 'yscale']:
assert_equal(axrep[key], 'linear')
assert_equal(axrep['zoomable'], True)
assert_equal(axrep['bbox'], bbox)
| bsd-3-clause |
vrieni/orange | Orange/testing/regression/tests_20/modules_mds2.py | 6 | 1102 | # Description: Uses MDS on iris data set and plots the scatterplot to illustrate the effect and observe the degree of separation between groups of different classes
# Category: association
# Classes: orngMDS.MDS
# Uses: iris.tab
# Referenced: orngMDS.htm
import orange
import orngMDS
data=orange.ExampleTable("../datasets/iris.tab")
euclidean = orange.ExamplesDistanceConstructor_Euclidean(data)
distance = orange.SymMatrix(len(data))
for i in range(len(data)-1):
for j in range(i+1, len(data)):
distance[i, j] = euclidean(data[i], data[j])
mds=orngMDS.MDS(distance)
mds.run(100)
try:
from pylab import *
colors = ["red", "yellow", "blue"]
points = []
for (i,d) in enumerate(data):
points.append((mds.points[i][0], mds.points[i][1], d.getclass()))
for c in range(len(data.domain.classVar.values)):
sel = filter(lambda x: x[-1]==c, points)
x = [s[0] for s in sel]
y = [s[1] for s in sel]
scatter(x, y, c=colors[c])
savefig('mds-iris.png', dpi=72)
show()
except ImportError:
print "Can't import pylab (matplotlib)"
| gpl-3.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/scipy/integrate/quadrature.py | 20 | 28269 | from __future__ import division, print_function, absolute_import
import numpy as np
import math
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a numpy function.
from numpy import trapz
from scipy.special import roots_legendre
from scipy.special import gammaln
from scipy._lib.six import xrange
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
class AccuracyWarning(Warning):
pass
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
If integrating a vector-valued function, the returned array must have
shape ``(..., len(x))``.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : float, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, uses this value as the first value in the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in xrange(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in xrange(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="")
for i in xrange(k+1):
for j in xrange(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <[email protected]>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <[email protected]>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <[email protected]>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in xrange(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in xrange(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in xrange(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in xrange(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]`
and :math:`\\Delta x = \\frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| mit |
laszlocsomor/tensorflow | tensorflow/examples/learn/boston.py | 75 | 2549 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
from sklearn import preprocessing
import tensorflow as tf
def main(unused_argv):
# Load dataset
boston = datasets.load_boston()
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column('x', shape=np.array(x_train).shape[1:])]
regressor = tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_train}, y=y_train, batch_size=1, num_epochs=None, shuffle=True)
regressor.train(input_fn=train_input_fn, steps=2000)
# Predict.
x_transformed = scaler.transform(x_test)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_transformed}, y=y_test, num_epochs=1, shuffle=False)
predictions = regressor.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['predictions'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score_sklearn = metrics.mean_squared_error(y_predicted, y_test)
print('MSE (sklearn): {0:f}'.format(score_sklearn))
# Score with tensorflow.
scores = regressor.evaluate(input_fn=test_input_fn)
print('MSE (tensorflow): {0:f}'.format(scores['average_loss']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
huzq/scikit-learn | sklearn/cluster/_affinity_propagation.py | 2 | 17232 | """Affinity Propagation clustering algorithm."""
# Author: Alexandre Gramfort [email protected]
# Gael Varoquaux [email protected]
# License: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_array, check_random_state
from ..utils.validation import check_is_fitted, _deprecate_positional_args
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
def _equal_similarities_and_preferences(S, preference):
def all_equal_preferences():
return np.all(preference == preference.flat[0])
def all_equal_similarities():
# Create mask to ignore diagonal of S
mask = np.ones(S.shape, dtype=bool)
np.fill_diagonal(mask, 0)
return np.all(S[mask].flat == S[mask].flat[0])
return all_equal_preferences() and all_equal_similarities()
@_deprecate_positional_args
def affinity_propagation(S, *, preference=None, convergence_iter=15,
max_iter=200, damping=0.5, copy=True, verbose=False,
return_n_iter=False, random_state='warn'):
"""Perform Affinity Propagation Clustering of data
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like of shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like of shape (n_samples,) or float, default=None
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, default=15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, default=200
Maximum number of iterations
damping : float, default=0.5
Damping factor between 0.5 and 1.
copy : bool, default=True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : bool, default=False
The verbosity level
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
random_state : int or RandomState instance, default=0
Pseudo-random number generator to control the starting state.
Use an int for reproducible results across function calls.
See the :term:`Glossary <random_state>`.
.. versionadded:: 0.23
this parameter was previously hardcoded as 0.
Returns
-------
cluster_centers_indices : ndarray of shape (n_clusters,)
index of clusters centers
labels : ndarray of shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
<sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
When the algorithm does not converge, it returns an empty array as
``cluster_center_indices`` and ``-1`` as label for each training sample.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, a single cluster center
and label ``0`` for every sample will be returned. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
preference = np.array(preference)
if (n_samples == 1 or
_equal_similarities_and_preferences(S, preference)):
# It makes no sense to run the algorithm in this case, so return 1 or
# n_samples clusters, depending on preferences
warnings.warn("All samples have mutually equal similarities. "
"Returning arbitrary cluster center(s).")
if preference.flat[0] >= S.flat[n_samples - 1]:
return ((np.arange(n_samples), np.arange(n_samples), 0)
if return_n_iter
else (np.arange(n_samples), np.arange(n_samples)))
else:
return ((np.array([0]), np.array([0] * n_samples), 0)
if return_n_iter
else (np.array([0]), np.array([0] * n_samples)))
if random_state == 'warn':
warnings.warn(("'random_state' has been introduced in 0.23. "
"It will be set to None starting from 0.25 which "
"means that results will differ at every function "
"call. Set 'random_state' to None to silence this "
"warning, or to 0 to keep the behavior of versions "
"<0.23."),
FutureWarning)
random_state = 0
random_state = check_random_state(random_state)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
never_converged = False
if verbose:
print("Converged after %d iterations." % it)
break
else:
never_converged = True
if verbose:
print("Did not converge")
I = np.flatnonzero(E)
K = I.size # Identify exemplars
if K > 0 and not never_converged:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
warnings.warn("Affinity propagation did not converge, this model "
"will not have any cluster centers.", ConvergenceWarning)
labels = np.array([-1] * n_samples)
cluster_centers_indices = []
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(ClusterMixin, BaseEstimator):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
damping : float, default=0.5
Damping factor (between 0.5 and 1) is the extent to
which the current value is maintained relative to
incoming values (weighted 1 - damping). This in order
to avoid numerical oscillations when updating these
values (messages).
max_iter : int, default=200
Maximum number of iterations.
convergence_iter : int, default=15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
copy : bool, default=True
Make a copy of input data.
preference : array-like of shape (n_samples,) or float, default=None
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : {'euclidean', 'precomputed'}, default='euclidean'
Which affinity to use. At the moment 'precomputed' and
``euclidean`` are supported. 'euclidean' uses the
negative squared euclidean distance between points.
verbose : bool, default=False
Whether to be verbose.
random_state : int or RandomState instance, default=0
Pseudo-random number generator to control the starting state.
Use an int for reproducible results across function calls.
See the :term:`Glossary <random_state>`.
.. versionadded:: 0.23
this parameter was previously hardcoded as 0.
Attributes
----------
cluster_centers_indices_ : ndarray of shape (n_clusters,)
Indices of cluster centers
cluster_centers_ : ndarray of shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : ndarray of shape (n_samples,)
Labels of each point
affinity_matrix_ : ndarray of shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
Notes
-----
For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
<sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
When ``fit`` does not converge, ``cluster_centers_`` becomes an empty
array and all training samples will be labelled as ``-1``. In addition,
``predict`` will then label every sample as ``-1``.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, ``fit`` will result in
a single cluster center and label ``0`` for every sample. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
Examples
--------
>>> from sklearn.cluster import AffinityPropagation
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> clustering = AffinityPropagation(random_state=5).fit(X)
>>> clustering
AffinityPropagation(random_state=5)
>>> clustering.labels_
array([0, 0, 0, 1, 1, 1])
>>> clustering.predict([[0, 0], [4, 4]])
array([0, 1])
>>> clustering.cluster_centers_
array([[1, 2],
[4, 2]])
"""
@_deprecate_positional_args
def __init__(self, *, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False, random_state='warn'):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
self.random_state = random_state
@property
def _pairwise(self):
return self.affinity == "precomputed"
def fit(self, X, y=None):
"""Fit the clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse feature matrix
is provided, it will be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
"""
if self.affinity == "precomputed":
accept_sparse = False
else:
accept_sparse = 'csr'
X = self._validate_data(X, accept_sparse=accept_sparse)
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, preference=self.preference,
max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True,
random_state=self.random_state)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
check_is_fitted(self)
X = check_array(X)
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
if self.cluster_centers_.shape[0] > 0:
return pairwise_distances_argmin(X, self.cluster_centers_)
else:
warnings.warn("This model does not have any cluster centers "
"because affinity propagation did not converge. "
"Labeling every sample as '-1'.", ConvergenceWarning)
return np.array([-1] * X.shape[0])
def fit_predict(self, X, y=None):
"""Fit the clustering from features or affinity matrix, and return
cluster labels.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse feature matrix
is provided, it will be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
return super().fit_predict(X, y)
| bsd-3-clause |
Housebeer/Natural-Gas-Model | Data Analytics/Fitting+curve.py | 1 | 3566 |
# coding: utf-8
# # Fitting curve to data
# Within this notebook we do some data analytics on historical data to feed some real numbers into the model. Since we assume the consumer data to be resemble a sinus, due to the fact that demand is seasonal, we will focus on fitting data to this kind of curve.
# In[69]:
import numpy as np
from scipy.optimize import leastsq
import pylab as plt
import pandas as pd
N = 1000 # number of data points
t = np.linspace(0, 4*np.pi, N)
data = 3.0*np.sin(t+0.001) + 0.5 + np.random.randn(N) # create artificial data with noise
guess_mean = np.mean(data)
guess_std = 3*np.std(data)/(2**0.5)
guess_phase = 0
# we'll use this to plot our first estimate. This might already be good enough for you
data_first_guess = guess_std*np.sin(t+guess_phase) + guess_mean
# Define the function to optimize, in this case, we want to minimize the difference
# between the actual data and our "guessed" parameters
optimize_func = lambda x: x[0]*np.sin(t+x[1]) + x[2] - data
est_std, est_phase, est_mean = leastsq(optimize_func, [guess_std, guess_phase, guess_mean])[0]
# recreate the fitted curve using the optimized parameters
data_fit = est_std*np.sin(t+est_phase) + est_mean
plt.plot(data, '.')
plt.plot(data_fit, label='after fitting')
plt.plot(data_first_guess, label='first guess')
plt.legend()
plt.show()
# ## import data for our model
# This is data imported from statline CBS webportal.
# In[70]:
importfile = 'CBS Statline Gas Usage.xlsx'
df = pd.read_excel(importfile, sheetname='Month', skiprows=1)
df.drop(['Onderwerpen_1', 'Onderwerpen_2', 'Perioden'], axis=1, inplace=True)
df
# In[71]:
# transpose
df = df.transpose()
# In[72]:
new_header = df.iloc[0]
df = df[1:]
df.rename(columns = new_header, inplace=True)
# In[73]:
df
# In[74]:
df['Via regionale netten'].plot()
plt.show()
# In[99]:
#print(data)
N = 84
t = np.linspace(1, 84, N)
b = 603
m = 3615
data = b + m*(.5 * (1 + np.cos((t/6)*np.pi))) + 100*np.random.randn(N) # create artificial data with noise
#print(t)
print(type(data[0]))
print(data)
plt.plot(t, data)
plt.show()
#print(est_std, est_phase, est_mean)
guess_std = 3*np.std(data)/(2**0.5)
print(guess_std)
data2 = df['Via regionale netten'].values
data3 = np.array(data2)
data3.astype(np.float64)
print(type(data3[0]))
print(data2)
print((len(data2)))
# In[102]:
#b = self.base_demand
#m = self.max_demand
#y = b + m * (.5 * (1 + np.cos((x/6)*np.pi)))
b = 603
m = 3615
N = 84 # number of data points
t = np.linspace(1, 84, N)
#data = b + m*(.5 * (1 + np.cos((t/6)*np.pi))) + 100*np.random.randn(N) # create artificial data with noise
#data = df['Via regionale netten'].values
data = data3
guess_mean = np.mean(data)
guess_std = 3*np.std(data)/(2**0.5)
guess_phase = 0
# we'll use this to plot our first estimate. This might already be good enough for you
data_first_guess = guess_mean + guess_std*(.5 * (1 + np.cos((t/6)*np.pi + guess_phase)))
# Define the function to optimize, in this case, we want to minimize the difference
# between the actual data and our "guessed" parameters
optimize_func = lambda x: x[0]*(.5 * (1 + np.cos((t/6)*np.pi+x[1]))) + x[2] - data
est_std, est_phase, est_mean = leastsq(optimize_func, [guess_std, guess_phase, guess_mean])[0]
# recreate the fitted curve using the optimized parameters
data_fit = est_mean + est_std*(.5 * (1 + np.cos((t/6)*np.pi + est_phase)))
plt.plot(data, '.')
plt.plot(data_fit, label='after fitting')
plt.plot(data_first_guess, label='first guess')
plt.legend()
plt.show()
print(est_std, est_phase, est_mean)
# In[ ]:
| mit |
bhargavasana/synthpop | synthpop/test/test_censushelpers.py | 3 | 2823 | import pytest
from ..census_helpers import Census
import numpy as np
from pandas.util.testing import assert_series_equal
@pytest.fixture
def c():
return Census("827402c2958dcf515e4480b7b2bb93d1025f9389")
def test_block_group_and_tract_query(c):
income_columns = ['B19001_0%02dE' % i for i in range(1, 18)]
vehicle_columns = ['B08201_0%02dE' % i for i in range(1, 7)]
workers_columns = ['B08202_0%02dE' % i for i in range(1, 6)]
families_columns = ['B11001_001E', 'B11001_002E']
block_group_columns = income_columns + families_columns
tract_columns = vehicle_columns + workers_columns
df = c.block_group_and_tract_query(block_group_columns,
tract_columns, "06", "075",
merge_columns=['tract', 'county',
'state'],
block_group_size_attr="B11001_001E",
tract_size_attr="B08201_001E",
tract="030600")
assert len(df) == 3
assert_series_equal(df["B11001_001E"], df["B08201_001E"])
assert np.all(df.state == "06")
assert np.all(df.county == "075")
df = c.block_group_and_tract_query(block_group_columns,
tract_columns, "06", "075",
merge_columns=['tract', 'county',
'state'],
block_group_size_attr="B11001_001E",
tract_size_attr="B08201_001E",
tract=None)
# number of block groups in San Francisco
assert len(df) == 581
assert_series_equal(df["B11001_001E"], df["B08201_001E"])
assert np.all(df.state == "06")
assert np.all(df.county == "075")
def test_wide_block_group_query(c):
population = ['B01001_001E']
sex = ['B01001_002E', 'B01001_026E']
race = ['B02001_0%02dE' % i for i in range(1, 11)]
male_age_columns = ['B01001_0%02dE' % i for i in range(3, 26)]
female_age_columns = ['B01001_0%02dE' % i for i in range(27, 50)]
all_columns = population + sex + race + male_age_columns + \
female_age_columns
df = c.block_group_query(all_columns, "06", "075", tract="030600")
assert len(df) == 3
assert np.all(df.state == "06")
assert np.all(df.county == "075")
assert len(df.columns) > 50
def test_tract_to_puma(c):
puma = c.tract_to_puma("06", "075", "030600")
assert puma == "07506"
def test_download_pums(c):
puma = "07506"
c.download_population_pums("06", puma)
c.download_household_pums("06", puma)
c.download_population_pums("10")
c.download_household_pums("10")
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.15/_downloads/plot_source_space_time_frequency.py | 15 | 2319 | """
===================================================
Compute induced power in the source space with dSPM
===================================================
Returns STC files ie source estimates of induced power
for different bands in the source space. The inverse method
is linear based on dSPM inverse operator.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_band_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax, event_id = -0.2, 0.5, 1
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
events = events[:10] # take 10 events to keep the computation time low
# Use linear detrend to reduce any edge artifacts
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
preload=True, detrend=1)
# Compute a source estimate per frequency band
bands = dict(alpha=[9, 11], beta=[18, 22])
stcs = source_band_induced_power(epochs, inverse_operator, bands, n_cycles=2,
use_fft=False, n_jobs=1)
for b, stc in stcs.items():
stc.save('induced_power_%s' % b)
###############################################################################
# plot mean power
plt.plot(stcs['alpha'].times, stcs['alpha'].data.mean(axis=0), label='Alpha')
plt.plot(stcs['beta'].times, stcs['beta'].data.mean(axis=0), label='Beta')
plt.xlabel('Time (ms)')
plt.ylabel('Power')
plt.legend()
plt.title('Mean source induced power')
plt.show()
| bsd-3-clause |
pbrod/scipy | scipy/stats/_discrete_distns.py | 13 | 22227 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, logsumexp, betaln, gammaln as gamln
from scipy._lib._numpy_compat import broadcast_to
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
Negative binomial distribution describes a sequence of i.i.d. Bernoulli
trials, repeated until a predefined, non-random number of successes occurs.
The probability mass function of the number of failures for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters where n is the number of
successes, whereas p is the probability of a single success.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
r"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
`M` is the total number of objects, `n` is total number of Type I objects.
The random variate represents the number of Type I objects in `N` drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not
universally accepted. See the Examples for a clarification of the
definitions used here.
The probability mass function is defined as,
.. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}}{\binom{M}{N}}
for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial
coefficients are defined as,
.. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}.
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = np.maximum(N-(M-n), 0)
self.b = np.minimum(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return betaln(good+1, 1) + betaln(bad+1,1) + betaln(tot-N+1, N+1)\
- betaln(k+1, good-k+1) - betaln(N-k+1,bad-N+k+1)\
- betaln(tot+1, 1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / special.log1p(-p)
def _stats(self, p):
r = special.log1p(-p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
self.a = np.where(lambda_ > 0, 0, -np.inf)
self.b = np.where(lambda_ > 0, np.inf, 0)
return lambda_ != 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high):
"""An array of *size* random integers >= ``low`` and < ``high``."""
if self._size is not None:
# Numpy's RandomState.randint() doesn't broadcast its arguments.
# Use `broadcast_to()` to extend the shapes of low and high
# up to self._size. Then we can use the numpy.vectorize'd
# randint without needing to pass it a `size` argument.
low = broadcast_to(low, self._size)
high = broadcast_to(high, self._size)
randint = np.vectorize(self._random_state.randint, otypes=[np.int_])
return randint(low, high)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |
alivecor/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 37 | 3774 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers."""
# Create three fully connected layers respectively of size 10, 20, and 10.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Convert the labels to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op with exponentially decaying learning rate.
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
rajat1994/scikit-learn | sklearn/ensemble/voting_classifier.py | 178 | 8006 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
FrancoisRheaultUS/dipy | dipy/tests/test_scripts.py | 8 | 5108 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Test scripts
Run scripts and check outputs
"""
"""
import glob
import os
import shutil
from os.path import (dirname, join as pjoin, abspath)
from dipy.testing import assert_true, assert_false
import numpy.testing as nt
import pytest
import nibabel as nib
from nibabel.tmpdirs import InTemporaryDirectory
from dipy.data import get_fnames
# Quickbundles command-line requires matplotlib:
try:
import matplotlib
no_mpl = False
except ImportError:
no_mpl = True
from dipy.tests.scriptrunner import ScriptRunner
runner = ScriptRunner(
script_sdir='bin',
debug_print_var='NIPY_DEBUG_PRINT')
run_command = runner.run_command
DATA_PATH = abspath(pjoin(dirname(__file__), 'data'))
def test_dipy_peak_extraction():
# test dipy_peak_extraction script
cmd = 'dipy_peak_extraction'
code, stdout, stderr = run_command(cmd, check_code=False)
npt.assert_equal(code, 2)
def test_dipy_fit_tensor():
# test dipy_fit_tensor script
cmd = 'dipy_fit_tensor'
code, stdout, stderr = run_command(cmd, check_code=False)
npt.assert_equal(code, 2)
def test_dipy_sh_estimate():
# test dipy_sh_estimate script
cmd = 'dipy_sh_estimate'
code, stdout, stderr = run_command(cmd, check_code=False)
npt.assert_equal(code, 2)
def assert_image_shape_affine(filename, shape, affine):
assert_true(os.path.isfile(filename))
image = nib.load(filename)
npt.assert_equal(image.shape, shape)
nt.assert_array_almost_equal(image.affine, affine)
def test_dipy_fit_tensor_again():
with InTemporaryDirectory():
dwi, bval, bvec = get_fnames("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(cmd)
npt.assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.affine
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
with InTemporaryDirectory():
dwi, bval, bvec = get_fnames("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--save-tensor",
"--mask=none", "small_25.nii.gz"]
out = run_command(cmd)
npt.assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.affine
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
# small_25_tensor saves the tensor as a symmetric matrix following
# the nifti standard.
ten_shape = shape + (1, 6)
assert_image_shape_affine("small_25_tensor.nii.gz", ten_shape,
affine)
@pytest.mark.skipif(no_mpl)
def test_qb_commandline():
with InTemporaryDirectory():
tracks_file = get_fnames('fornix')
cmd = ["dipy_quickbundles", tracks_file, '--pkl_file', 'mypickle.pkl',
'--out_file', 'tracks300.trk']
out = run_command(cmd)
npt.assert_equal(out[0], 0)
@pytest.mark.skipif(no_mpl)
def test_qb_commandline_output_path_handling():
with InTemporaryDirectory():
# Create temporary subdirectory for input and for output
os.mkdir('work')
os.mkdir('output')
os.chdir('work')
tracks_file = get_fnames('fornix')
# Need to specify an output directory with a "../" style path
# to trigger old bug.
cmd = ["dipy_quickbundles", tracks_file, '--pkl_file', 'mypickle.pkl',
'--out_file', os.path.join('..', 'output', 'tracks300.trk')]
out = run_command(cmd)
npt.assert_equal(out[0], 0)
# Make sure the files were created in the output directory
os.chdir('../')
output_files_list = glob.glob('output/tracks300_*.trk')
assert_true(output_files_list)
"""
| bsd-3-clause |
3manuek/scikit-learn | sklearn/utils/multiclass.py | 92 | 13986 | # Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
import warnings
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_sequence_of_sequence(y):
if hasattr(y, '__array__'):
y = np.asarray(y)
return set(chain.from_iterable(y))
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-sequences': _unique_sequence_of_sequence,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1] for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %r" % ys)
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_label_indicator_matrix(y):
""" Check if ``y`` is in the label indicator matrix format (multilabel).
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a label indicator matrix format,
else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_label_indicator_matrix
>>> is_label_indicator_matrix([0, 1, 0, 1])
False
>>> is_label_indicator_matrix([[1], [0, 2], []])
False
>>> is_label_indicator_matrix(np.array([[1, 0], [0, 0]]))
True
>>> is_label_indicator_matrix(np.array([[1], [0], [0]]))
False
>>> is_label_indicator_matrix(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def is_sequence_of_sequences(y):
""" Check if ``y`` is in the sequence of sequences format (multilabel).
This format is DEPRECATED.
Parameters
----------
y : sequence or array.
Returns
-------
out : bool,
Return ``True``, if ``y`` is a sequence of sequences else ``False``.
"""
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
try:
if hasattr(y, '__array__'):
y = np.asarray(y)
out = (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types))
except (IndexError, TypeError):
return False
if out:
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
return out
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
return is_label_indicator_matrix(y) or is_sequence_of_sequences(y)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-sequences': `y` is a sequence of sequences, a 1d
array-like of objects that are sequences of labels.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_sequence_of_sequences(y):
return 'multilabel-sequences'
elif is_label_indicator_matrix(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# known to fail in numpy 1.3 for array of arrays
return 'unknown'
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown'
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown'
elif y.ndim == 2 and y.shape[1] > 1:
suffix = '-multioutput'
else:
# column vector or 1d
suffix = ''
# check float and contains non-integer float values:
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
return 'continuous' + suffix
if len(np.unique(y)) <= 2:
assert not suffix, "2d binary array-like should be multilabel"
return 'binary'
else:
return 'multiclass' + suffix
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
xyguo/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
Garrett-R/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 41 | 4827 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
avoorhis/mbl_sequencing_pipeline | pipeline/fastalib.py | 2 | 8262 | # -*- coding: utf-8 -*-
# v.010112
# Copyright (C) 2011, Marine Biological Laboratory
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the docs/COPYING file.
import os
import sys
import numpy
import hashlib
class FastaOutput:
def __init__(self, output_file_path):
self.output_file_path = output_file_path
self.output_file_obj = open(output_file_path, 'w')
def store(self, entry, split = True):
if entry.unique:
self.write_id('%s|%s' % (entry.id, 'frequency:%d' % len(entry.ids)))
else:
self.write_id(entry.id)
self.write_seq(entry.seq, split)
def write_id(self, id):
self.output_file_obj.write('>%s\n' % id)
def write_seq(self, seq, split = True):
if split:
seq = self.split(seq)
self.output_file_obj.write('%s\n' % seq)
def split(self, sequence, piece_length = 80):
ticks = range(0, len(sequence), piece_length) + [len(sequence)]
return '\n'.join([sequence[ticks[x]:ticks[x + 1]] for x in range(0, len(ticks) - 1)])
def close(self):
self.output_file_obj.close()
class ReadFasta:
def __init__(self, f_name):
self.ids = []
self.sequences = []
self.fasta = SequenceSource(f_name)
while self.fasta.next():
if self.fasta.pos % 1000 == 0 or self.fasta.pos == 1:
sys.stderr.write('\r[fastalib] Reading FASTA into memory: %s' % (self.fasta.pos))
sys.stderr.flush()
self.ids.append(self.fasta.id)
self.sequences.append(self.fasta.seq)
sys.stderr.write('\n')
def close(self):
self.fasta.close()
class SequenceSource:
def __init__(self, fasta_file_path, lazy_init = True, unique = False):
self.fasta_file_path = fasta_file_path
self.name = None
self.lazy_init = lazy_init
self.pos = 0
self.id = None
self.seq = None
self.ids = []
self.unique = unique
self.unique_hash_dict = {}
self.unique_hash_list = []
self.unique_next_hash = 0
self.file_pointer = open(self.fasta_file_path)
self.file_pointer.seek(0)
if self.lazy_init:
self.total_seq = None
else:
self.total_seq = len([l for l in self.file_pointer.readlines() if l.startswith('>')])
self.reset()
if self.unique:
self.init_unique_hash()
def init_unique_hash(self):
while self.next_regular():
hash = hashlib.sha1(self.seq).hexdigest()
if hash in self.unique_hash_dict:
self.unique_hash_dict[hash]['ids'].append(self.id)
self.unique_hash_dict[hash]['count'] += 1
else:
self.unique_hash_dict[hash] = {'id' : self.id,
'ids': [self.id],
'seq': self.seq,
'count': 1}
self.unique_hash_list = [i[1] for i in sorted([(self.unique_hash_dict[hash]['count'], hash)\
for hash in self.unique_hash_dict], reverse = True)]
self.total_seq = len(self.unique_hash_dict)
self.reset()
def next(self):
if self.unique:
return self.next_unique()
else:
return self.next_regular()
def next_unique(self):
if self.unique:
if self.total_seq > 0 and self.pos < self.total_seq:
hash_entry = self.unique_hash_dict[self.unique_hash_list[self.pos]]
self.pos += 1
self.seq = hash_entry['seq']
self.id = hash_entry['id']
self.ids = hash_entry['ids']
return True
else:
return False
else:
return False
def next_regular(self):
self.seq = None
self.id = self.file_pointer.readline()[1:].strip()
sequence = ''
while 1:
line = self.file_pointer.readline()
if not line:
if len(sequence):
self.seq = sequence
return True
else:
return False
if line.startswith('>'):
self.file_pointer.seek(self.file_pointer.tell() - len(line))
break
sequence += line.strip()
self.seq = sequence
self.pos += 1
return True
def close(self):
self.file_pointer.close()
def reset(self):
self.pos = 0
self.id = None
self.seq = None
self.ids = []
self.file_pointer.seek(0)
def visualize_sequence_length_distribution(self, title, dest = None, max_seq_len = None, xtickstep = None, ytickstep = None):
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
sequence_lengths = []
self.reset()
while self.next():
if self.pos % 10000 == 0 or self.pos == 1:
sys.stderr.write('\r[fastalib] Reading: %s' % (self.pos))
sys.stderr.flush()
sequence_lengths.append(len(self.seq))
self.reset()
sys.stderr.write('\n')
if not max_seq_len:
max_seq_len = max(sequence_lengths) + (int(max(sequence_lengths) / 100.0) or 10)
seq_len_distribution = [0] * (max_seq_len + 1)
for l in sequence_lengths:
seq_len_distribution[l] += 1
fig = plt.figure(figsize = (16, 12))
plt.rcParams.update({'axes.linewidth' : 0.9})
plt.rc('grid', color='0.50', linestyle='-', linewidth=0.1)
gs = gridspec.GridSpec(10, 1)
ax1 = plt.subplot(gs[0:8])
plt.grid(True)
plt.subplots_adjust(left=0.05, bottom = 0.03, top = 0.95, right = 0.98)
plt.plot(seq_len_distribution, color = 'black', alpha = 0.3)
plt.fill_between(range(0, max_seq_len + 1), seq_len_distribution, y2 = 0, color = 'black', alpha = 0.15)
plt.ylabel('number of sequences')
plt.xlabel('sequence length')
if xtickstep == None:
xtickstep = (max_seq_len / 50) or 1
if ytickstep == None:
ytickstep = max(seq_len_distribution) / 20 or 1
plt.xticks(range(xtickstep, max_seq_len + 1, xtickstep), rotation=90, size='xx-small')
plt.yticks(range(0, max(seq_len_distribution) + 1, ytickstep),
[y for y in range(0, max(seq_len_distribution) + 1, ytickstep)],
size='xx-small')
plt.xlim(xmin = 0, xmax = max_seq_len)
plt.ylim(ymin = 0, ymax = max(seq_len_distribution) + (max(seq_len_distribution) / 20.0))
plt.figtext(0.5, 0.96, '%s' % (title), weight = 'black', size = 'xx-large', ha = 'center')
ax1 = plt.subplot(gs[9])
plt.rcParams.update({'axes.edgecolor' : 20})
plt.grid(False)
plt.yticks([])
plt.xticks([])
plt.text(0.02, 0.5, 'total: %s / mean: %.2f / std: %.2f / min: %s / max: %s'\
% (len(sequence_lengths),
numpy.mean(sequence_lengths), numpy.std(sequence_lengths),\
min(sequence_lengths),\
max(sequence_lengths)),\
va = 'center', alpha = 0.8, size = 'x-large')
if dest == None:
dest = self.fasta_file_path
try:
plt.savefig(dest + '.tiff')
except:
plt.savefig(dest + '.png')
try:
plt.show()
except:
pass
return
if __name__ == '__main__':
fasta = SequenceSource(sys.argv[1])
output = FastaOutput(sys.argv[1] + 'out.fa')
fasta.visualize_sequence_length_distribution(title = sys.argv[2] if len(sys.argv) == 3 else 'None')
while fasta.next():
output.store(fasta)
output.close() | gpl-2.0 |
iamblusky/Led_Tracking | src/picam_tracker_py/scripts/picam_timer.py | 2 | 6348 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on July 31 09:51:20 2014
@author: Mathieu Garon
@email: [email protected]
"""
import roslib; roslib.load_manifest('picam_tracker_py')
import rospy
from sensor_msgs.msg import Image
import std_srvs.srv
import cv2
import picamera
import wiringpi2 as gpio
import os
import io
import numpy as np
import threading
import time
import image_processor as proc
import sharedGlobals as sg
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class picam_tester:
def __init__(self):
try:
self.picam = picamera.PiCamera()
except:
rospy.logfatal("Check if the Picam is free or installed")
rospy.Service('test_camera',std_srvs.srv.Empty,self.test_cb)
rospy.Service('gaussian_background_modeling',std_srvs.srv.Empty,self.gauss_BG_model_cb)
rospy.Service('correlation',std_srvs.srv.Empty,self.correlation_cb)
self._init_picamera()
self._init_led()
#datas:
self.mu = []
self.sig = []
self._flash_led(nflash=4)
rospy.loginfo("Picam_tester ready...")
rospy.spin()
def __del__(self):
self.picam.close()
def correlation_cb(self,req):
rospy.loginfo("Correlation")
w=1296
h=730
d=50
template_signal =np.array([1,1,0,0,1,1,0,0,1,1,1,1,0,0,0,0])
with sg.VAR_LOCK:
sg.CORR_DATA = proc.rectangleBuffers(25,len(template_signal),[[250,200,200,200]]) #[[500,200,300,300]])
corr_thread = proc.TimeCorrelation(template_signal)
video_fps = self._process_video(proc.Correlation,w,h,d,processors = 3)
self._get_chunk_time()
with sg.VAR_LOCK:
corr_thread.terminated = True
corr_thread.join()
self._empty_pool()
self._reset_globals()
#self._save_video(filename='correlation.avi',fps=video_fps)
rospy.loginfo("Save Plot")
plt.plot(sg.PLOT)
plt.plot(sg.PLOT2)
plt.savefig('/home/CameraNetwork/July/plot.jpg')
cv2.imwrite('/home/CameraNetwork/July/image.jpg',sg.PICTURE)
rospy.loginfo("Ending service.")
return []
def gauss_BG_model_cb(self,req):
rospy.loginfo("Background Gaussian Modeling")
self._process_video(proc.GrayFrameCapture)
with sg.VAR_LOCK:
self._empty_pool()
self._reset_globals()
Matrix = sg.VIDEO_MATRIX[0:15].astype(np.uint16)
d,h,w = Matrix.shape
rospy.loginfo("Starting modelisation")
self.mu = sum(Matrix)/d
std = Matrix-self.mu
self.sig = np.sqrt(sum(std*std)/d)
rospy.loginfo("normfit done...")
sg.MU = self.mu.astype(np.float32)
sg.SIG = self.sig.astype(np.float32)
#substract part:
rospy.loginfo("Starting background substraction...")
video_fps = self._process_video(proc.BackgroundSubstraction,depth=30,processors=2)
self._get_chunk_time()
with sg.VAR_LOCK:
self._empty_pool()
self._reset_globals()
self._save_video(filename='bg_substraction.avi',fps=video_fps)
rospy.loginfo("Ending service.")
return []
def test_cb(self,req):
rospy.loginfo("Begin Tests!")
video_fps = self._process_video(proc.TestImageProcessor)
totalTime = 0
self._get_chunk_time()
with sg.VAR_LOCK:
self._empty_pool();
self._reset_globals()
self.save_video(fps=video_fps)
rospy.loginfo("Ending service.")
return []
def _process_video(self,procClass,width=1296,heigth=730,depth=50, processors=4):
gpio.digitalWrite(self.led,True)
with sg.VAR_LOCK:
#yuv : convert width and height to fit with yuv format
sg._WIDTH = (width+31)//32*32
sg._HEIGTH = (heigth+15)//16*16
sg._DEPTH = depth
sg.VIDEO_MATRIX = np.zeros([sg._DEPTH + 1,sg._HEIGTH,sg._WIDTH],np.uint8)
sg.POOL = [procClass() for i in range(processors)]
self.picam.resolution = (width,heigth)
self.picam.framerate = 90
rospy.sleep(1)
startTime = rospy.get_rostime()
self.picam.capture_sequence(proc.streams(),'yuv',use_video_port=True)
gpio.digitalWrite(self.led,False)
deltaTime = rospy.get_rostime() - startTime
fps = depth/deltaTime.to_sec()
rospy.loginfo("Capture : " + str(fps) + " fps.")
return fps
def _save_video(self,filename='test.avi',fps=20):
rospy.loginfo("Saving Video...")
filename = '/home/CameraNetwork/July/' + filename
video =cv2.VideoWriter(filename,cv2.cv.CV_FOURCC('M','J','P','G'),fps,
(sg._WIDTH,sg._HEIGTH),isColor = False)
for i in sg.VIDEO_MATRIX:
video.write(i)
video.release()
def _empty_pool(self):
rospy.loginfo("Terminating Threads...")
while sg.POOL:
processor = sg.POOL.pop()
processor.terminated = True
processor.join()
def _get_chunk_time(self):
totalTime = 0
for i in sg.TIME_LIST:
totalTime += i.to_sec()
rospy.loginfo("the chunk takes " + str(totalTime/len(sg.TIME_LIST)) + " sec")
def _reset_globals(self):
sg.POOL = []
sg.FRAME_COUNT = 0
sg.DONE = False
sg.TIME_LIST = []
#proc.VIDEO_MATRIX = np.zeros([_DEPTH + 1,_HEIGTH,_WIDTH],np.uint8)
def _init_picamera(self):
self.picam.exposure_mode = 'fixedfps'
self.picam.awb_mode = 'off'
self.picam.awb_gains = 1.4
#self.picam.resolution = (1296,972)
#self.picam.framerate = 40
def _init_led(self):
self.led = 5
os.system("gpio export " + str(self.led) + " out")
if gpio.wiringPiSetupSys() != 0:
rospy.logfatal("Unable to setup gpio")
gpio.digitalWrite(self.led,False)
def _flash_led(self, nflash=1, delay=0.1):
for n in range(nflash):
gpio.digitalWrite(self.led,True)
rospy.sleep(delay)
gpio.digitalWrite(self.led,False)
rospy.sleep(delay)
if __name__ == "__main__":
rospy.init_node('picam_tester')
server = picam_tester();
| bsd-2-clause |
mohseniaref/PySAR-1 | pysar/info.py | 1 | 5619 | #! /usr/bin/env python
############################################################
# Program is part of PySAR v1.0 #
# Copyright(c) 2013, Heresh Fattahi #
# Author: Heresh Fattahi #
############################################################
#
# Add 'coherence' option, Yunjun, Jul 2015
#
import sys
import os
from numpy import std
#import matplotlib.pyplot as plt
import h5py
import datetime
import time
def Usage():
print '''
***************************************************************
***************************************************************
Displayes the general information of the PySAR product h5 file.
Usage:
info.py file.h5
example:
info.py timeseries.h5
info.py velocity_demCor_masked.h5
info.py temporal_coherence.h5
info.py Loaded_igrams.h5
info.py Loaded_igrams.h5 3
info.py Coherence_KyushuT424AlosA.h5
***************************************************************
***************************************************************
'''
def main(argv):
try:
File=argv[0]
except:
Usage();sys.exit(1)
h5file=h5py.File(File,'r')
k=h5file.keys()
print '******************************************'
print '******************************************'
print 'PySAR'
print '**********************'
print 'File contains: '+ k[0]
print '**********************'
if len(k)==1 and k[0] in ('velocity','temporal_coherence','rmse','mask'):
try:
h5file[k[0]].attrs['X_FIRST']
print 'coordinates : GEO'
except:
print 'coordinates : radar'
print '**********************'
print 'Attributes:'
print''
for key , value in h5file[k[0]].attrs.iteritems():
print key + ' : ' + str(value)
elif 'timeseries' in k:
try:
h5file[k[0]].attrs['X_FIRST']
print 'coordinates : GEO'
except:
print 'coordinates : radar'
print '**********************'
dateList = h5file['timeseries'].keys()
print 'Number of epochs: '+str(len(dateList))
print 'Start Date: '+dateList[0]
print 'End Date: '+dateList[-1]
print '**********************'
print 'List of the dates:'
print dateList
print '**********************'
print 'List of the dates in years'
t=[]
for i in range(len(dateList)):
ti=(datetime.datetime(*time.strptime(dateList[i],"%Y%m%d")[0:5]))
tt = ti.timetuple()
ty=tt.tm_year + (tt.tm_mon-1)/12.0 + tt.tm_mday/365.0
t.append(ty)
print t
print '*****************************************'
print 'Standard deviation of aquisition times :'
print str(std(t)) + ' years'
print '**********************'
print 'Attributes:'
print''
for key,value in h5file['timeseries'].attrs.iteritems():
print key + ' : ' + str(value)
print '*****************************************'
print 'All groups in this file:'
print k
elif 'interferograms' in k:
ifgramList = h5file['interferograms'].keys()
try:
h5file['interferograms'][ifgramList[0]].attrs['X_FIRST']
print 'coordinates : GEO'
except:
print 'coordinates : radar'
print '**********************'
try:
igramNumber=int(argv[1])
print ifgramList[igramNumber-1]
print '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
for key, value in h5file['interferograms'][ifgramList[igramNumber-1]].attrs.iteritems():
print key, value
print '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
print ifgramList[igramNumber-1]
except:
print 'Number of interferograms: '+str(len(ifgramList))
print '**********************'
print 'List of the interferogram: eNum'
eNum=0
for ifg in ifgramList:
print ifg + ' ' + str(eNum)
eNum=eNum+1
print '**********************'
print 'File contains: '+ k[0]
print 'Number of interferograms: '+str(len(ifgramList))
print 'All groups in this file:'
print k
for key, value in h5file['interferograms'].attrs.iteritems():
print key, value
elif k[0] in ('coherence','wrapped'):
corList = h5file[k[0]].keys()
try:
h5file[k[0]][corList[0]].attrs['X_FIRST']
print 'coordinates : GEO'
except:
print 'coordinates : radar'
print '**********************'
try:
corNumber=int(argv[1])
print corList[corNumber-1]
print '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
for key, value in h5file[k[0]][corList[corNumber-1]].attrs.iteritems():
print key, value
print '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
print corList[corNumber-1]
except:
print 'Number of '+k[0]+': '+str(len(corList))
print '**********************'
print 'List of the '+k[0]+': eNum'
eNum=0
for cor in corList:
print cor + ' ' + str(eNum)
eNum=eNum+1
print '**********************'
print 'File contains: '+ k[0]
print 'Number of '+k[0]+': '+str(len(corList))
print 'All groups in this file:'
print k
for key, value in h5file[k[0]].attrs.iteritems():
print key, value
print '******************************************'
print '******************************************'
h5file.close()
if __name__ == '__main__':
main(sys.argv[1:])
| mit |
gclenaghan/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 4 | 39882 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
buzmakov/tomography_scripts | misc/analyze_pores.py | 1 | 8046 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
# %matplotlib inline
# %%
import numpy as np
import pylab as plt
import h5py
from tqdm import tqdm_notebook
from scipy import ndimage as ndi
from scipy.ndimage.morphology import distance_transform_edt
from scipy.ndimage.morphology import binary_closing, binary_fill_holes, binary_opening, binary_dilation, binary_erosion
# from skimage.morphology import watershe
from skimage.feature import peak_local_max
from skimage.measure import regionprops
from skimage.segmentation import watershed, random_walker
from pathlib import Path
import cv2
# %%
data_folderes = ['/diskmnt/b/makov/robotom/a47e0d1d-444c-4647-b9e1-5f0a3d7441b6',
'/diskmnt/b/makov/robotom/cd190221-f7c9-4509-a65e-039097920945',
'/diskmnt/b/makov/robotom/02223e5a-1018-461d-baeb-471daa535d8f']
# %%
df_number = 2
df = Path(data_folderes[df_number])
data = h5py.File(df / 'tomo_rec.h5')['Reconstruction']
if df_number == 0 :
data = data[210:,300:1000, 200:900]
elif df_number == 1 :
data = data[1850:2400, 400:1600, 400:1700]
elif df_number ==2:
data = data[1790:2320, 580:1300, 500:1260]
# %%
# plt.figure()
# plt.imshow(data[1790:2320, 500:1300, data.shape[2]//2])
# plt.show()
# %%
def create_mask(im):
t = im>0.01
t = cv2.medianBlur(t.astype('float32'), 7)
t = binary_fill_holes(binary_closing(t))
t = binary_erosion(t)
return t
# %%
plt.figure(figsize=(10,10))
plt.imshow(data[340], vmin=0.01, vmax=0.1, cmap=plt.cm.gray_r)
plt.colorbar()
plt.show()
plt.figure(figsize=(10,10))
plt.imshow(create_mask(data[340]), cmap=plt.cm.gray_r)
# plt.colorbar()
plt.show()
# %%
# # !rm -rf images
# %%
out_dir = Path('pores')/str(df_number)
out_dir.mkdir(parents=True, exist_ok=True)
# %%
for i in tqdm_notebook(range(data.shape[0])):
plt.imsave(out_dir / f'0_{i}.png',data[i], vmin=0.01, vmax=0.1, cmap=plt.cm.gray_r)
for i in tqdm_notebook(range(data.shape[1])):
plt.imsave(out_dir / f'1_{i}.png',data[:,i,:], vmin=0.01, vmax=0.1, cmap=plt.cm.gray_r)
for i in tqdm_notebook(range(data.shape[2])):
plt.imsave(out_dir / f'2_{i}.png',data[:,:,i], vmin=0.01, vmax=0.1, cmap=plt.cm.gray_r)
# %%
# !ffmpeg -y -r 10 -i "{out_dir}/0_%d.png" -b:v 2000k {out_dir}/poly_0.avi
# !ffmpeg -y -r 10 -i "{out_dir}/1_%d.png" -b:v 2000k {out_dir}/poly_1.avi
# !ffmpeg -y -r 10 -i "{out_dir}/2_%d.png" -b:v 2000k {out_dir}/poly_2.avi
# %%
for i in tqdm_notebook(range(data.shape[0])):
if df_number == 0:
thr = 0.01
elif df_number == 1:
thr = 0.05
data[i] = cv2.medianBlur(np.asarray(data[i]>thr, dtype='float32'), 7)
# %%
# plt.figure(figsize=(12,12))
# plt.imshow(cv2.medianBlur(np.asarray(data[300,:,:]>0.05, dtype='float32'), 7))
# plt.show()
# %%
# x = data[200]
def find_pores(x, debug=False):
x = x.copy()
x[x<0.01] = 0.01
x_m = cv2.medianBlur(np.asarray(x, dtype='float32'), 7)-cv2.medianBlur(np.asarray(x, dtype='float32'), 3)
data_dtf = distance_transform_edt(x>0)
data_dtf_r = distance_transform_edt(x<1)
pores = binary_opening(binary_closing((np.abs(x_m)<0.004)*(x<0.08)))
mask = create_mask(x)
pores = pores*mask
if debug:
plt.figure(figsize=(15,15))
plt.imshow(x)
plt.contour(pores)
# plt.colorbar(orientation='horizontal')
plt.show()
# plt.figure(figsize=(15,15))
# plt.imshow(pores)
# # plt.colorbar(orientation='horizontal')
# plt.show()
# plt.figure(figsize=(15,15))
# plt.imshow(mask)
# # plt.colorbar(orientation='horizontal')
# plt.show()
return pores
# %%
for i in range(70,350, 50):
find_pores(data[i], True)
# %%
pores = data.copy()
for i in tqdm_notebook(range(pores.shape[0])):
pores[i] = find_pores(pores[i])
# %%
pores_t = pores #[200:300, 200:500, 200:500]
# mask_t = mask[200:300, 200:500, 200:500]
pores_dtf = distance_transform_edt(pores_t)
pores_dtf_r = distance_transform_edt(1-pores_t)
# %%
plt.figure(figsize=(15,15))
plt.imshow(pores_dtf[50])
plt.colorbar(orientation='horizontal')
plt.show()
# plt.figure(figsize=(15,15))
# plt.imshow(pores_dtf_r[50]*binary_erosion(mask, iterations=20), vmax=5)
# plt.colorbar(orientation='horizontal')
# plt.show()
plt.figure(figsize=(15,15))
plt.imshow(pores_dtf_r[50], vmax=5)
plt.colorbar(orientation='horizontal')
plt.show()
# %%
# # #https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_watershed.html#sphx-glr-auto-examples-segmentation-plot-watershed-py
# local_maxi = peak_local_max(pores_dtf, indices=False,
# threshold_abs=2, min_distance=10,# footprint=np.ones((3, 3, 3)),
# labels=pores_t)#
# markers, num_features = ndi.label(local_maxi)#, np.ones((3, 3, 3)))
# labels = watershed(-pores_dtf, markers, mask=pores_t)
# %%
#https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_watershed.html#sphx-glr-auto-examples-segmentation-plot-watershed-py
# pores_t = pores[200:300, 200:500, 200:500]
# local_maxi = peak_local_max(pores_dtf, indices=False, min_distance=3)#, footprint=np.ones((3, 3, 3)))
# markers, num_features = ndi.label(pores_t)
# labels = watershed(pores_t, markers)
# %%
markers, num_features = ndi.label(pores_dtf>0, np.ones((3, 3, 3)))
num_features
# %%
import os
def reshape_volume(volume, reshape):
res = np.zeros([s//reshape for s in volume.shape], dtype='float32')
xs,ys,zs = [s*reshape for s in res.shape]
for x,y,z in np.ndindex(reshape, reshape, reshape):
res += volume[x:xs:reshape, y:ys:reshape, z:zs:reshape]
return res/reshape**3
def save_amira(in_array, out_path, reshape=3):
data_path = str(out_path)
with open(os.path.join(data_path, 'amira.raw'), 'wb') as amira_file:
reshaped_vol = reshape_volume(in_array, reshape)
reshaped_vol.tofile(amira_file)
file_shape = reshaped_vol.shape
with open(os.path.join(data_path, 'tomo.hx'), 'w') as af:
af.write('# Amira Script\n')
af.write('remove -all\n')
af.write(r'[ load -raw ${SCRIPTDIR}/amira.raw little xfastest float 1 '+
str(file_shape[2])+' '+str(file_shape[1])+' '+str(file_shape[0])+
' 0 '+str(file_shape[2]-1)+' 0 '+str(file_shape[1]-1)+' 0 '+str(file_shape[0]-1)+
' ] setLabel tomo.raw\n')
# %%
save_amira(markers, out_dir, 1)
# %%
regions=regionprops(markers)
# %%
plt.figure(figsize=(15,15))
plt.imshow(pores_dtf[50])
# plt.colorbar(orientation='horizontal')
plt.contour(markers[50],colors='r')
plt.show()
# %%
plt.figure(figsize=(15,15))
plt.imshow(pores_t[50])
plt.contour(markers[50], colors='r')#, vmin = np.percentile(labels[200].flat, 77))
# plt.colorbar(orientation='horizontal')
plt.show()
plt.figure(figsize=(15,15))
plt.imshow(markers[50])
# plt.colorbar(orientation='horizontal')
plt.show()
plt.figure(figsize=(15,15))
plt.imshow(markers[:,200,:])
# plt.colorbar(orientation='horizontal')
plt.show()
plt.figure(figsize=(15,15))
plt.imshow(markers[:,:,200])
# plt.colorbar(orientation='horizontal')
plt.show()
# %%
vol = [r.area for r in regions if r.area<1e7]
# #volume of each pore
# vol = np.zeros((num_features+1), dtype=int)
# for x in tqdm_notebook(labels.flat):
# vol[x] += 1
# %%
xv, yv = np.histogram(vol[1:], bins=100)
plt.figure(figsize=(15,15))
plt.semilogy(yv[1:],xv,'o')
plt.grid()
plt.show()
# %%
#Raduis of each pore
tt = local_maxi*pores_dtf #todo.fixit
xr, yr = np.histogram(tt.flat, bins=100)
xr0, yr0 = np.histogram(np.power(vol,1./3), bins=1000)
# %%
plt.figure(figsize=(15,15))
plt.semilogy(yr[1:],xr[:],'o')
plt.semilogy(yr0[2:],xr0[1:],'o')
plt.xlim([0,20])
plt.grid()
plt.show()
# %%
# %%
| mit |
matthijsvk/multimodalSR | code/audioSR/old/fixDataset/TIMIT_utils_modified.py | 1 | 13527 | import os
import pickle
import sys
import librosa
import numpy as np
import scipy
import theano
TIMIT_original_dir = '/home/matthijs/TCDTIMIT/TIMIT/original'
TIMIT_fixed_dir = '/home/matthijs/TCDTIMIT/TIMIT/fixed'
def get_data(rootdir=TIMIT_fixed_dir):
inputs = []
targets = []
alphabet = {}
# count number of files for showing progress.
wavCounter = 0
for root, dirs, files in os.walk(rootdir):
for file in files:
if file.endswith('.wav'):
wavCounter += 1
print "There are ", wavCounter, " files to be processed"
from progress_bar import show_progress
processed = 0
for dir_path, sub_dirs, files in os.walk(rootdir):
for file in files:
if (os.path.join(dir_path, file)).endswith('.wav'):
## Get the data itself: inputs and targets
# --------------------------
wav_file_name = os.path.join(dir_path, file)
# from https://github.com/dtjchen/spoken-command-processor/blob/master/model/utils.py
sampling_rate, frames = scipy.io.wavfile.read(wav_file_name)
segment_duration_ms = 20
n_fft = int((segment_duration_ms / 1000.) * sampling_rate)
hop_duration_ms = 10
hop_length = int((hop_duration_ms / 1000.) * sampling_rate)
mfcc_count = 13
mfccs = librosa.feature.mfcc(
y=frames,
sr=sampling_rate,
n_mfcc=mfcc_count,
hop_length=hop_length,
n_fft=n_fft
)
mfcc_delta = librosa.feature.delta(mfccs)
mfcc_delta2 = librosa.feature.delta(mfccs, order=2)
# full_input = np.vstack([mfccs, mfcc_delta, mfcc_delta2])
full_input = np.concatenate((mfccs, mfcc_delta, mfcc_delta2), axis=1)
inputs.append(np.asarray(full_input, dtype=theano.config.floatX))
text_file_name = wav_file_name[:-4] + '.txt'
target_data_file = open(text_file_name)
target_data = str(target_data_file.read()).lower().translate(None, '!:,".;?')
target_data = target_data[8:-1] # No '.' in lexfree dictionary
targets.append(target_data)
## Get alphabet
# ------------------------
transcription_filename = os.path.join(dir_path, file)[:-4] + '.txt'
transcription_file = open(transcription_filename, 'r')
transcription = str(transcription_file.read()).lower().translate(None, '!:,".;?')
transcription = transcription[8:-1]
# count number of occurences of each character
for char in transcription:
if not char in alphabet:
alphabet.update({char: 1})
else:
alphabet[char] += 1
processed += 1
if (processed % 100 == 0):
show_progress(float(processed) / wavCounter)
print " | Read", processed, "files out of", wavCounter
print 'TIMIT Alphabet:\n', alphabet
alphabet_filename = 'TIMIT_Alphabet.pkl'
with open(alphabet_filename, 'wb') as f:
pickle.dump(alphabet, f, protocol=2)
return inputs, targets, alphabet
# convert the 61 phonemes from TIMIT to the reduced set of 39 phonemes -> preprocessing: substitute_phones.py
#################################################################
##### TODO read in phoneme data: see SpokenCommandProcessor/model/dataset.py and SCP/model/utils.py
###############################################################
def get_TIMIT_targets_one_hot(inputs, targets, alphabet):
list_of_alphabets = [key for key in alphabet]
list_of_alphabets.sort()
# print list_of_alphabets
num_targets = len(list_of_alphabets)
# print len(targets[0])
# targets_as_alphabet_indices = [[seq.index(char) for char in seq] for seq in targets]
one_hot_targets = [[np.zeros((num_targets)) for char in example] for example in targets]
# print len(one_hot_targets[0]), one_hot_targets[0]#, len(one_hot_targets[0][0][0])
for example_num in range(len(targets)):
for char_num in range(len(targets[example_num])):
# print targets[example_num][char_num]
# print list_of_alphabets.index(targets[example_num][char_num])
one_hot_targets[example_num][char_num][list_of_alphabets.index(targets[example_num][char_num])] = 1
return one_hot_targets
def get_TIMIT_targets_as_alphabet_indices(inputs, targets, alphabet):
list_of_alphabets = [key for key in alphabet]
list_of_alphabets.sort()
print('list of alphabets: {}'.format(list_of_alphabets))
print len(list_of_alphabets)
# print list_of_alphabets.index(22)
print targets[0]
targets_as_alphabet_indices = [[list_of_alphabets.index(char) for char in target] for target in targets]
print "Example target and alphabet indices: "
print 'target = {} \n alphabet indices = {}'.format(targets[0], targets_as_alphabet_indices[0])
return targets_as_alphabet_indices
def index2char_TIMIT(input_index_seq=None, TIMIT_pkl_file=os.path.join(os.getcwd(), 'TIMIT_data_prepared_for_CTC.pkl')):
with open(TIMIT_pkl_file, 'rb') as f:
data = pickle.load(f)
list_of_alphabets = data['chars']
blank_char = '_'
list_of_alphabets.append(blank_char)
output_character_seq = [list_of_alphabets[i] for i in input_index_seq]
output_sentence = ''.join(output_character_seq)
# for i in input_index_seq:
# output_character_seq.append(list_of_alphabets[i])
return output_sentence
def create_mask(TIMIT_pkl_file=os.path.join(os.getcwd(), 'TIMIT_data_prepared_for_CLM.pkl')):
with open(TIMIT_pkl_file, 'rb') as f:
data = pickle.load(f)
x = data['x']
max_seq_len = max([len(x[i]) for i in range(len(x))])
mask = np.zeros((len(x), max_seq_len))
for eg_num in range(len(x)):
mask[eg_num, 0:len(x[eg_num])] = 1
return mask
def prepare_TIMIT_for_CTC(dataset='train', savedir=os.getcwd(), test=0):
print 'Getting: Inputs, Targets, Alphabet...'
print "#########################"
rootdir = os.path.join(TIMIT_fixed_dir, dataset)
if (test):
### Read from pkl for faster testing
in_file_name = savedir + '/TIMIT_data_prepared_for_CTC.pkl'
with open(in_file_name, 'rb') as f:
reclaimed_data = pickle.load(f)
inputs = reclaimed_data['x']
targets = reclaimed_data['y_char']
targets_as_alphabet_indices = reclaimed_data['y_indices']
targets_one_hot = reclaimed_data['y_onehot']
alphabet = reclaimed_data['chars']
sample_input = inputs[0]
sample_target = targets[0]
# print sample_input
# print sample_target
else:
inputs, targets, alphabet = get_data(rootdir)
print "Generating coded targets..."
print "#########################"
targets_as_alphabet_indices = get_TIMIT_targets_as_alphabet_indices(inputs, targets, alphabet)
targets_one_hot = get_TIMIT_targets_one_hot(inputs, targets, alphabet)
list_of_alphabets = [key for key in alphabet]
list_of_alphabets.sort()
print "Alphabet list: ", list_of_alphabets
targets_as_alphabet_indices = [[list_of_alphabets.index(char) for char in target] for target in targets]
print "Example target and alphabet indices: "
print 'target = {} \nalphabet indices = {}'.format(targets[0], targets_as_alphabet_indices[0])
# prepare file structure to store data
n_batch = len(inputs)
max_input_length = max([len(inputs[i]) for i in range(len(inputs))])
input_dim = len(inputs[0][0])
X = np.zeros((n_batch, max_input_length, input_dim))
input_mask = np.zeros((n_batch, max_input_length)) # 1 if there's input data on this row
# read data, store in created structures
print "Storing data in X matrix..."
for example_id in range(len(inputs)):
curr_seq_len = len(inputs[example_id])
X[example_id, :curr_seq_len] = inputs[example_id]
input_mask[example_id, :curr_seq_len] = 1
print "Example of data read:"
sample_input = inputs[0]
sample_target = targets[0]
print "\t input: ", sample_input
print "\t target:", sample_target
"
## TODO: normalize the inputs using mean.
# From https://github.com/dtjchen/spoken-command-processor/blob/master/model/utils.py
from sklearn import preprocessing
def normalize_mean(X):
scaler = preprocessing.StandardScaler(with_mean=True, with_std=False).fit(X)
X = scaler.transform(X)
return X, scaler.mean
print "Normalizing input data using mean..."
X, mean = normalize_mean(X)
print "Mean of input data:", mean
print "After Normalization: example of data read:"
sample_input = inputs[0]
sample_target = targets[0]
print "\t input: ", sample_input
print "\t target: sample_target"
if (not test):
out_file_name = savedir + '/TIMIT_data_prepared_for_CTC.pkl'
print "Dumping to pickle file", out_file_name
with open(out_file_name, 'wb') as f:
pickle.dump({
'x': X,
'inputs': inputs,
'mask': input_mask.astype(theano.config.floatX),
'y_indices': targets_as_alphabet_indices,
'y_char': targets,
'y_onehot': targets_one_hot,
'chars': list_of_alphabets
}, f, protocol=2)
print 'success!'
def prepare_TIMIT_for_CLM(dataset='train', savedir=os.getcwd(), test=0):
rootdir = os.path.join(TIMIT_fixed_dir, dataset)
if (test):
### Read from pkl for faster testing
in_file_name = savedir + '/TIMIT_data_prepared_for_CTC.pkl'
with open(in_file_name, 'rb') as f:
reclaimed_data = pickle.load(f)
inputs = reclaimed_data['x']
targets = reclaimed_data['y_char']
targets_as_alphabet_indices = reclaimed_data['y_indices']
targets_one_hot = reclaimed_data['y_onehot']
alphabet = reclaimed_data['chars']
sample_input = inputs[0]
sample_target = targets[0]
# print sample_input
# print sample_target
else:
inputs, targets, alphabet = get_data(rootdir)
t = get_TIMIT_targets_one_hot(inputs, targets, alphabet)
t1 = get_TIMIT_targets_as_alphabet_indices(inputs, targets, alphabet)
n_batch = len(t)
max_input_length = max(
[len(t[i]) for i in range(len(t))]) - 1 # As we predict from one less than the total sequence length
input_dim = len(t[0][0])
X = np.zeros((n_batch, max_input_length, input_dim))
Y = np.zeros((n_batch, max_input_length))
input_mask = np.zeros((n_batch, max_input_length))
for example_id in range(len(t)):
curr_seq_len = len(t[example_id][:-1])
X[example_id, :curr_seq_len] = t[example_id][:-1]
input_mask[example_id, :curr_seq_len] = 1
Y[example_id, :curr_seq_len] = t1[example_id][1:]
# inputs = X[:,:-1,:]
# outputs = Y[:,1:]
inputs1 = []
outputs1 = [
]
for example_id in range(len(t)):
# # example_inputs = t[example_id][:-1]
# # example_outputs = t[example_id][1:]
# # inputs.append(example_inputs)
# # outputs.append(example_outputs)
example_inputs1 = t1[example_id][:-1]
example_outputs1 = t1[example_id][1:]
inputs1.append(example_inputs1)
outputs1.append(example_outputs1)
if (not test):
out_file_name = savedir + '/TIMIT_data_prepared_for_CLM.pkl'
with open(out_file_name, 'wb') as f:
# pickle.dump({'x':inputs, 'x_indices':inputs1, 'y': outputs, 'y_indices':outputs1}, f, protocol=3)
# pickle.dump({'x':inputs.astype(theano.config.floatX), 'mask':input_mask.astype(theano.config.floatX), 'x_indices':inputs1, 'y': outputs, 'y_indices':outputs1}, f, protocol=3)
pickle.dump({
'x': X.astype(theano.config.floatX), 'mask': input_mask.astype(theano.config.floatX),
'y': Y.astype(np.int32), 'x_list': inputs1, 'y_list': outputs1
}, f, protocol=2)
# inputs = [ [ [ t[example][char] ] for char in range(0, len(t[example])-1)] for example in range(len(t))]
# outputs = [ [ [ t[example][char] ] for char in range(1, len(t[example]))] for example in range(len(t))]
# return inputs, outputs#, inputs1, outputs1
if __name__ == '__main__':
if len(sys.argv) > 1:
dataset = str(sys.argv[1])
else:
dataset = ''
savedir = os.getcwd()
# pdb.set_trace()
from fixWavs import *
fixWavs(TIMIT_original_dir, TIMIT_fixed_dir)
# now we still need to copy the other files (txt, phn, wrd) to the fixed dir.
prepare_TIMIT_for_CTC(dataset, savedir, test=0)
print("\n\n##############################")
print("#### Preparing for CLM... ###")
print("##############################")
prepare_TIMIT_for_CLM(dataset, savedir, test=1)
| mit |
ContinuumIO/dask | dask/bag/core.py | 1 | 77970 | import io
import itertools
import math
import operator
import uuid
import warnings
from collections import defaultdict
from collections.abc import Iterable, Iterator
from functools import wraps, partial, reduce
from random import Random
from urllib.request import urlopen
import tlz as toolz
from tlz import (
merge,
take,
valmap,
partition_all,
remove,
compose,
curry,
first,
second,
accumulate,
peek,
frequencies,
merge_with,
join,
reduceby,
count,
pluck,
groupby,
topk,
unique,
accumulate,
)
from .. import config
from .avro import to_avro
from ..base import tokenize, dont_optimize, DaskMethodsMixin
from ..bytes import open_files
from ..context import globalmethod
from ..core import quote, istask, get_dependencies, reverse_dict, flatten
from ..delayed import Delayed, unpack_collections
from ..highlevelgraph import HighLevelGraph
from ..multiprocessing import get as mpget
from ..optimization import fuse, cull, inline
from ..utils import (
apply,
system_encoding,
takes_multiple_arguments,
funcname,
digit,
insert,
ensure_dict,
ensure_bytes,
ensure_unicode,
key_split,
)
from . import chunk
no_default = "__no__default__"
no_result = type(
"no_result", (object,), {"__slots__": (), "__reduce__": lambda self: "no_result"}
)
def lazify_task(task, start=True):
"""
Given a task, remove unnecessary calls to ``list`` and ``reify``.
This traverses tasks and small lists. We choose not to traverse down lists
of size >= 50 because it is unlikely that sequences this long contain other
sequences in practice.
Examples
--------
>>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP
>>> lazify_task(task) # doctest: +SKIP
(sum, (map, inc, [1, 2, 3]))
"""
if type(task) is list and len(task) < 50:
return [lazify_task(arg, False) for arg in task]
if not istask(task):
return task
head, tail = task[0], task[1:]
if not start and head in (list, reify):
task = task[1]
return lazify_task(*tail, start=False)
else:
return (head,) + tuple([lazify_task(arg, False) for arg in tail])
def lazify(dsk):
"""
Remove unnecessary calls to ``list`` in tasks.
See Also
--------
``dask.bag.core.lazify_task``
"""
return valmap(lazify_task, dsk)
def inline_singleton_lists(dsk, keys, dependencies=None):
""" Inline lists that are only used once.
>>> d = {'b': (list, 'a'),
... 'c': (f, 'b', 1)} # doctest: +SKIP
>>> inline_singleton_lists(d) # doctest: +SKIP
{'c': (f, (list, 'a'), 1)}
Pairs nicely with lazify afterwards.
"""
if dependencies is None:
dependencies = {k: get_dependencies(dsk, task=v) for k, v in dsk.items()}
dependents = reverse_dict(dependencies)
inline_keys = {
k
for k, v in dsk.items()
if istask(v) and v and v[0] is list and len(dependents[k]) == 1
}
inline_keys.difference_update(flatten(keys))
dsk = inline(dsk, inline_keys, inline_constants=False)
for k in inline_keys:
del dsk[k]
return dsk
def optimize(dsk, keys, fuse_keys=None, rename_fused_keys=None, **kwargs):
""" Optimize a dask from a dask Bag. """
dsk = ensure_dict(dsk)
dsk2, dependencies = cull(dsk, keys)
kwargs = {}
if rename_fused_keys is not None:
kwargs["rename_keys"] = rename_fused_keys
dsk3, dependencies = fuse(dsk2, keys + (fuse_keys or []), dependencies, **kwargs)
dsk4 = inline_singleton_lists(dsk3, keys, dependencies)
dsk5 = lazify(dsk4)
return dsk5
def _to_textfiles_chunk(data, lazy_file, last_endline):
with lazy_file as f:
if isinstance(f, io.TextIOWrapper):
endline = u"\n"
ensure = ensure_unicode
else:
endline = b"\n"
ensure = ensure_bytes
started = False
for d in data:
if started:
f.write(endline)
else:
started = True
f.write(ensure(d))
if last_endline:
f.write(endline)
def to_textfiles(
b,
path,
name_function=None,
compression="infer",
encoding=system_encoding,
compute=True,
storage_options=None,
last_endline=False,
**kwargs
):
""" Write dask Bag to disk, one filename per partition, one line per element.
**Paths**: This will create one file for each partition in your bag. You
can specify the filenames in a variety of ways.
Use a globstring
>>> b.to_textfiles('/path/to/data/*.json.gz') # doctest: +SKIP
The * will be replaced by the increasing sequence 1, 2, ...
::
/path/to/data/0.json.gz
/path/to/data/1.json.gz
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
Strings produced by name_function must preserve the order of their
respective partition indices.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> b.to_textfiles('/path/to/data/*.json.gz', name_function=name) # doctest: +SKIP
::
/path/to/data/2015-01-01.json.gz
/path/to/data/2015-01-02.json.gz
...
You can also provide an explicit list of paths.
>>> paths = ['/path/to/data/alice.json.gz', '/path/to/data/bob.json.gz', ...] # doctest: +SKIP
>>> b.to_textfiles(paths) # doctest: +SKIP
**Compression**: Filenames with extensions corresponding to known
compression algorithms (gz, bz2) will be compressed accordingly.
**Bag Contents**: The bag calling ``to_textfiles`` must be a bag of
text strings. For example, a bag of dictionaries could be written to
JSON text files by mapping ``json.dumps`` on to the bag first, and
then calling ``to_textfiles`` :
>>> b_dict.map(json.dumps).to_textfiles("/path/to/data/*.json") # doctest: +SKIP
**Last endline**: By default the last line does not end with a newline
character. Pass ``last_endline=True`` to invert the default.
"""
mode = "wb" if encoding is None else "wt"
files = open_files(
path,
compression=compression,
mode=mode,
encoding=encoding,
name_function=name_function,
num=b.npartitions,
**(storage_options or {})
)
name = "to-textfiles-" + uuid.uuid4().hex
dsk = {
(name, i): (_to_textfiles_chunk, (b.name, i), f, last_endline)
for i, f in enumerate(files)
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b])
out = type(b)(graph, name, b.npartitions)
if compute:
out.compute(**kwargs)
return [f.path for f in files]
else:
return out.to_delayed()
def finalize(results):
if not results:
return results
if isinstance(results, Iterator):
results = list(results)
if isinstance(results[0], Iterable) and not isinstance(results[0], str):
results = toolz.concat(results)
if isinstance(results, Iterator):
results = list(results)
return results
def finalize_item(results):
return results[0]
class StringAccessor(object):
""" String processing functions
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.lower())
['alice smith', 'bob jones', 'charlie smith']
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
>>> list(b.str.split(' '))
[['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]
"""
def __init__(self, bag):
self._bag = bag
def __dir__(self):
return sorted(set(dir(type(self)) + dir(str)))
def _strmap(self, key, *args, **kwargs):
return self._bag.map(operator.methodcaller(key, *args, **kwargs))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(str):
func = getattr(str, key)
return robust_wraps(func)(partial(self._strmap, key))
else:
raise
def match(self, pattern):
""" Filter strings by those that match a pattern.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
See Also
--------
fnmatch.fnmatch
"""
from fnmatch import fnmatch
return self._bag.filter(partial(fnmatch, pat=pattern))
def robust_wraps(wrapper):
""" A weak version of wraps that only copies doc. """
def _(wrapped):
wrapped.__doc__ = wrapper.__doc__
return wrapped
return _
class Item(DaskMethodsMixin):
def __init__(self, dsk, key):
self.dask = dsk
self.key = key
self.name = key
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [self.key]
def __dask_tokenize__(self):
return self.key
__dask_optimize__ = globalmethod(optimize, key="bag_optimize", falsey=dont_optimize)
__dask_scheduler__ = staticmethod(mpget)
def __dask_postcompute__(self):
return finalize_item, ()
def __dask_postpersist__(self):
return Item, (self.key,)
@staticmethod
def from_delayed(value):
""" Create bag item from a dask.delayed value.
See ``dask.bag.from_delayed`` for details
"""
from dask.delayed import Delayed, delayed
if not isinstance(value, Delayed) and hasattr(value, "key"):
value = delayed(value)
assert isinstance(value, Delayed)
return Item(ensure_dict(value.dask), value.key)
@property
def _args(self):
return (self.dask, self.key)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self.key = state
def apply(self, func):
name = "{0}-{1}".format(funcname(func), tokenize(self, func, "apply"))
dsk = {name: (func, self.key)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return Item(graph, name)
__int__ = __float__ = __complex__ = __bool__ = DaskMethodsMixin.compute
def to_delayed(self, optimize_graph=True):
"""Convert into a ``dask.delayed`` object.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
"""
from dask.delayed import Delayed
dsk = self.__dask_graph__()
if optimize_graph:
dsk = self.__dask_optimize__(dsk, self.__dask_keys__())
return Delayed(self.key, dsk)
class Bag(DaskMethodsMixin):
""" Parallel collection of Python objects
Examples
--------
Create Bag from sequence
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10)) # doctest: +SKIP
[0, 20, 40]
Create Bag from filename or globstring of filenames
>>> b = db.read_text('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP
Create manually (expert use)
>>> dsk = {('x', 0): (range, 5),
... ('x', 1): (range, 5),
... ('x', 2): (range, 5)}
>>> b = Bag(dsk, 'x', npartitions=3)
>>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
>>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP
30
"""
def __init__(self, dsk, name, npartitions):
if not isinstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self.name = name
self.npartitions = npartitions
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [(self.name, i) for i in range(self.npartitions)]
def __dask_layers__(self):
return (self.name,)
def __dask_tokenize__(self):
return self.name
__dask_optimize__ = globalmethod(optimize, key="bag_optimize", falsey=dont_optimize)
__dask_scheduler__ = staticmethod(mpget)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return type(self), (self.name, self.npartitions)
def __str__(self):
return "dask.bag<%s, npartitions=%d>" % (key_split(self.name), self.npartitions)
__repr__ = __str__
str = property(fget=StringAccessor)
def map(self, func, *args, **kwargs):
"""Apply a function elementwise across one or more bags.
Note that all ``Bag`` arguments must be partitioned identically.
Parameters
----------
func : callable
*args, **kwargs : Bag, Item, or object
Extra arguments and keyword arguments to pass to ``func`` *after*
the calling bag instance. Non-Bag args/kwargs are broadcasted
across all calls to ``func``.
Notes
-----
For calls with multiple `Bag` arguments, corresponding partitions
should have the same length; if they do not, the call will error at
compute time.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(5), npartitions=2)
>>> b2 = db.from_sequence(range(5, 10), npartitions=2)
Apply a function to all elements in a bag:
>>> b.map(lambda x: x + 1).compute()
[1, 2, 3, 4, 5]
Apply a function with arguments from multiple bags:
>>> from operator import add
>>> b.map(add, b2).compute()
[5, 7, 9, 11, 13]
Non-bag arguments are broadcast across all calls to the mapped
function:
>>> b.map(add, 1).compute()
[1, 2, 3, 4, 5]
Keyword arguments are also supported, and have the same semantics as
regular arguments:
>>> def myadd(x, y=0):
... return x + y
>>> b.map(myadd, y=b2).compute()
[5, 7, 9, 11, 13]
>>> b.map(myadd, y=1).compute()
[1, 2, 3, 4, 5]
Both arguments and keyword arguments can also be instances of
``dask.bag.Item``. Here we'll add the max value in the bag to each
element:
>>> b.map(myadd, b.max()).compute()
[4, 5, 6, 7, 8]
"""
return bag_map(func, self, *args, **kwargs)
def starmap(self, func, **kwargs):
"""Apply a function using argument tuples from the given bag.
This is similar to ``itertools.starmap``, except it also accepts
keyword arguments. In pseudocode, this is could be written as:
>>> def starmap(func, bag, **kwargs):
... return (func(*args, **kwargs) for args in bag)
Parameters
----------
func : callable
**kwargs : Item, Delayed, or object, optional
Extra keyword arguments to pass to ``func``. These can either be
normal objects, ``dask.bag.Item``, or ``dask.delayed.Delayed``.
Examples
--------
>>> import dask.bag as db
>>> data = [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)]
>>> b = db.from_sequence(data, npartitions=2)
Apply a function to each argument tuple:
>>> from operator import add
>>> b.starmap(add).compute()
[3, 7, 11, 15, 19]
Apply a function to each argument tuple, with additional keyword
arguments:
>>> def myadd(x, y, z=0):
... return x + y + z
>>> b.starmap(myadd, z=10).compute()
[13, 17, 21, 25, 29]
Keyword arguments can also be instances of ``dask.bag.Item`` or
``dask.delayed.Delayed``:
>>> max_second = b.pluck(1).max()
>>> max_second.compute()
10
>>> b.starmap(myadd, z=max_second).compute()
[13, 17, 21, 25, 29]
"""
name = "{0}-{1}".format(
funcname(func), tokenize(self, func, "starmap", **kwargs)
)
dependencies = [self]
if kwargs:
kwargs, collections = unpack_scalar_dask_kwargs(kwargs)
dependencies.extend(collections)
dsk = {
(name, i): (reify, (starmap_chunk, func, (self.name, i), kwargs))
for i in range(self.npartitions)
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
return type(self)(graph, name, self.npartitions)
@property
def _args(self):
return (self.dask, self.name, self.npartitions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self.name, self.npartitions = state
def filter(self, predicate):
""" Filter elements in collection by a predicate function.
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(iseven)) # doctest: +SKIP
[0, 2, 4]
"""
name = "filter-{0}-{1}".format(funcname(predicate), tokenize(self, predicate))
dsk = dict(
((name, i), (reify, (filter, predicate, (self.name, i))))
for i in range(self.npartitions)
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return type(self)(graph, name, self.npartitions)
def random_sample(self, prob, random_state=None):
""" Return elements from bag with probability of ``prob``.
Parameters
----------
prob : float
A float between 0 and 1, representing the probability that each
element will be returned.
random_state : int or random.Random, optional
If an integer, will be used to seed a new ``random.Random`` object.
If provided, results in deterministic sampling.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.random_sample(0.5, 43))
[0, 3, 4]
>>> list(b.random_sample(0.5, 43))
[0, 3, 4]
"""
if not 0 <= prob <= 1:
raise ValueError("prob must be a number in the interval [0, 1]")
if not isinstance(random_state, Random):
random_state = Random(random_state)
name = "random-sample-%s" % tokenize(self, prob, random_state.getstate())
state_data = random_state_data_python(self.npartitions, random_state)
dsk = {
(name, i): (reify, (random_sample, (self.name, i), state, prob))
for i, state in zip(range(self.npartitions), state_data)
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return type(self)(graph, name, self.npartitions)
def remove(self, predicate):
""" Remove elements in collection that match predicate.
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.remove(iseven)) # doctest: +SKIP
[1, 3]
"""
name = "remove-{0}-{1}".format(funcname(predicate), tokenize(self, predicate))
dsk = dict(
((name, i), (reify, (remove, predicate, (self.name, i))))
for i in range(self.npartitions)
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return type(self)(graph, name, self.npartitions)
def map_partitions(self, func, *args, **kwargs):
"""Apply a function to every partition across one or more bags.
Note that all ``Bag`` arguments must be partitioned identically.
Parameters
----------
func : callable
The function to be called on every partition.
This function should expect an ``Iterator`` or ``Iterable`` for
every partition and should return an ``Iterator`` or ``Iterable``
in return.
*args, **kwargs : Bag, Item, Delayed, or object
Arguments and keyword arguments to pass to ``func``.
Partitions from this bag will be the first argument, and these will
be passed *after*.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(1, 101), npartitions=10)
>>> def div(nums, den=1):
... return [num / den for num in nums]
Using a python object:
>>> hi = b.max().compute()
>>> hi
100
>>> b.map_partitions(div, den=hi).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Using an ``Item``:
>>> b.map_partitions(div, den=b.max()).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Note that while both versions give the same output, the second forms a
single graph, and then computes everything at once, and in some cases
may be more efficient.
"""
return map_partitions(func, self, *args, **kwargs)
def pluck(self, key, default=no_default):
""" Select item from all tuples/dicts in collection.
>>> b = from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]},
... {'name': 'Bob', 'credits': [10, 20]}])
>>> list(b.pluck('name')) # doctest: +SKIP
['Alice', 'Bob']
>>> list(b.pluck('credits').pluck(0)) # doctest: +SKIP
[1, 10]
"""
name = "pluck-" + tokenize(self, key, default)
key = quote(key)
if default == no_default:
dsk = dict(
((name, i), (list, (pluck, key, (self.name, i))))
for i in range(self.npartitions)
)
else:
dsk = dict(
((name, i), (list, (pluck, key, (self.name, i), default)))
for i in range(self.npartitions)
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return type(self)(graph, name, self.npartitions)
def unzip(self, n):
"""Transform a bag of tuples to ``n`` bags of their elements.
Examples
--------
>>> b = from_sequence([(i, i + 1, i + 2) for i in range(10)])
>>> first, second, third = b.unzip(3)
>>> isinstance(first, Bag)
True
>>> first.compute()
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Note that this is equivalent to:
>>> first, second, third = (b.pluck(i) for i in range(3))
"""
return tuple(self.pluck(i) for i in range(n))
@wraps(to_textfiles)
def to_textfiles(
self,
path,
name_function=None,
compression="infer",
encoding=system_encoding,
compute=True,
storage_options=None,
last_endline=False,
**kwargs
):
return to_textfiles(
self,
path,
name_function,
compression,
encoding,
compute,
storage_options=storage_options,
last_endline=last_endline,
**kwargs
)
@wraps(to_avro)
def to_avro(
self,
filename,
schema,
name_function=None,
storage_options=None,
codec="null",
sync_interval=16000,
metadata=None,
compute=True,
**kwargs
):
return to_avro(
self,
filename,
schema,
name_function,
storage_options,
codec,
sync_interval,
metadata,
compute,
**kwargs
)
def fold(
self, binop, combine=None, initial=no_default, split_every=None, out_type=Item
):
""" Parallelizable reduction
Fold is like the builtin function ``reduce`` except that it works in
parallel. Fold takes two binary operator functions, one to reduce each
partition of our dataset and another to combine results between
partitions
1. ``binop``: Binary operator to reduce within each partition
2. ``combine``: Binary operator to combine results from binop
Sequentially this would look like the following:
>>> intermediates = [reduce(binop, part) for part in partitions] # doctest: +SKIP
>>> final = reduce(combine, intermediates) # doctest: +SKIP
If only one function is given then it is used for both functions
``binop`` and ``combine`` as in the following example to compute the
sum:
>>> def add(x, y):
... return x + y
>>> b = from_sequence(range(5))
>>> b.fold(add).compute() # doctest: +SKIP
10
In full form we provide both binary operators as well as their default
arguments
>>> b.fold(binop=add, combine=add, initial=0).compute() # doctest: +SKIP
10
More complex binary operators are also doable
>>> def add_to_set(acc, x):
... ''' Add new element x to set acc '''
... return acc | set([x])
>>> b.fold(add_to_set, set.union, initial=set()).compute() # doctest: +SKIP
{1, 2, 3, 4, 5}
See Also
--------
Bag.foldby
"""
combine = combine or binop
if initial is not no_default:
return self.reduction(
curry(_reduce, binop, initial=initial),
curry(_reduce, combine),
split_every=split_every,
out_type=out_type,
)
else:
from tlz.curried import reduce
return self.reduction(
reduce(binop),
reduce(combine),
split_every=split_every,
out_type=out_type,
)
def frequencies(self, split_every=None, sort=False):
""" Count number of occurrences of each distinct element.
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> dict(b.frequencies()) # doctest: +SKIP
{'Alice': 2, 'Bob', 1}
"""
result = self.reduction(
frequencies,
merge_frequencies,
out_type=Bag,
split_every=split_every,
name="frequencies",
).map_partitions(dictitems)
if sort:
result = result.map_partitions(sorted, key=second, reverse=True)
return result
def topk(self, k, key=None, split_every=None):
""" K largest elements in collection
Optionally ordered by some key function
>>> b = from_sequence([10, 3, 5, 7, 11, 4])
>>> list(b.topk(2)) # doctest: +SKIP
[11, 10]
>>> list(b.topk(2, lambda x: -x)) # doctest: +SKIP
[3, 4]
"""
if key:
if callable(key) and takes_multiple_arguments(key):
key = partial(apply, key)
func = partial(topk, k, key=key)
else:
func = partial(topk, k)
return self.reduction(
func,
compose(func, toolz.concat),
out_type=Bag,
split_every=split_every,
name="topk",
)
def distinct(self, key=None):
""" Distinct elements of collection
Unordered without repeats.
Parameters
----------
key: {callable,str}
Defines uniqueness of items in bag by calling ``key`` on each item.
If a string is passed ``key`` is considered to be ``lambda x: x[key]``.
Examples
--------
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> sorted(b.distinct())
['Alice', 'Bob']
>>> b = from_sequence([{'name': 'Alice'}, {'name': 'Bob'}, {'name': 'Alice'}])
>>> b.distinct(key=lambda x: x['name']).compute()
[{'name': 'Alice'}, {'name': 'Bob'}]
>>> b.distinct(key='name').compute()
[{'name': 'Alice'}, {'name': 'Bob'}]
"""
func = chunk_distinct if key is None else partial(chunk_distinct, key=key)
agg = merge_distinct if key is None else partial(merge_distinct, key=key)
return self.reduction(func, agg, out_type=Bag, name="distinct")
def reduction(
self, perpartition, aggregate, split_every=None, out_type=Item, name=None
):
""" Reduce collection with reduction operators.
Parameters
----------
perpartition: function
reduction to apply to each partition
aggregate: function
reduction to apply to the results of all partitions
split_every: int (optional)
Group partitions into groups of this size while performing reduction
Defaults to 8
out_type: {Bag, Item}
The out type of the result, Item if a single element, Bag if a list
of elements. Defaults to Item.
Examples
--------
>>> b = from_sequence(range(10))
>>> b.reduction(sum, sum).compute()
45
"""
if split_every is None:
split_every = 8
if split_every is False:
split_every = self.npartitions
token = tokenize(self, perpartition, aggregate, split_every)
a = "%s-part-%s" % (name or funcname(perpartition), token)
is_last = self.npartitions == 1
dsk = {
(a, i): (empty_safe_apply, perpartition, (self.name, i), is_last)
for i in range(self.npartitions)
}
k = self.npartitions
b = a
fmt = "%s-aggregate-%s" % (name or funcname(aggregate), token)
depth = 0
while k > split_every:
c = fmt + str(depth)
for i, inds in enumerate(partition_all(split_every, range(k))):
dsk[(c, i)] = (
empty_safe_aggregate,
aggregate,
[(b, j) for j in inds],
False,
)
k = i + 1
b = c
depth += 1
dsk[(fmt, 0)] = (
empty_safe_aggregate,
aggregate,
[(b, j) for j in range(k)],
True,
)
graph = HighLevelGraph.from_collections(fmt, dsk, dependencies=[self])
if out_type is Item:
dsk[fmt] = dsk.pop((fmt, 0))
return Item(graph, fmt)
else:
return Bag(graph, fmt, 1)
def sum(self, split_every=None):
""" Sum all elements """
return self.reduction(sum, sum, split_every=split_every)
def max(self, split_every=None):
""" Maximum element """
return self.reduction(max, max, split_every=split_every)
def min(self, split_every=None):
""" Minimum element """
return self.reduction(min, min, split_every=split_every)
def any(self, split_every=None):
""" Are any of the elements truthy? """
return self.reduction(any, any, split_every=split_every)
def all(self, split_every=None):
""" Are all elements truthy? """
return self.reduction(all, all, split_every=split_every)
def count(self, split_every=None):
""" Count the number of elements. """
return self.reduction(count, sum, split_every=split_every)
def mean(self):
""" Arithmetic mean """
def mean_chunk(seq):
total, n = 0.0, 0
for x in seq:
total += x
n += 1
return total, n
def mean_aggregate(x):
totals, counts = list(zip(*x))
return 1.0 * sum(totals) / sum(counts)
return self.reduction(mean_chunk, mean_aggregate, split_every=False)
def var(self, ddof=0):
""" Variance """
return self.reduction(
chunk.var_chunk, partial(chunk.var_aggregate, ddof=ddof), split_every=False
)
def std(self, ddof=0):
""" Standard deviation """
return self.var(ddof=ddof).apply(math.sqrt)
def join(self, other, on_self, on_other=None):
""" Joins collection with another collection.
Other collection must be one of the following:
1. An iterable. We recommend tuples over lists for internal
performance reasons.
2. A delayed object, pointing to a tuple. This is recommended if the
other collection is sizable and you're using the distributed
scheduler. Dask is able to pass around data wrapped in delayed
objects with greater sophistication.
3. A Bag with a single partition
You might also consider Dask Dataframe, whose join operations are much
more heavily optimized.
Parameters
----------
other: Iterable, Delayed, Bag
Other collection on which to join
on_self: callable
Function to call on elements in this collection to determine a
match
on_other: callable (defaults to on_self)
Function to call on elements in the other collection to determine a
match
Examples
--------
>>> people = from_sequence(['Alice', 'Bob', 'Charlie'])
>>> fruit = ['Apple', 'Apricot', 'Banana']
>>> list(people.join(fruit, lambda x: x[0])) # doctest: +SKIP
[('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')]
"""
name = "join-" + tokenize(self, other, on_self, on_other)
dsk = {}
if isinstance(other, Bag):
if other.npartitions == 1:
dsk.update(other.dask)
other = other.__dask_keys__()[0]
dsk["join-%s-other" % name] = (list, other)
else:
msg = (
"Multi-bag joins are not implemented. "
"We recommend Dask dataframe if appropriate"
)
raise NotImplementedError(msg)
elif isinstance(other, Delayed):
dsk.update(other.dask)
other = other._key
elif isinstance(other, Iterable):
other = other
else:
msg = (
"Joined argument must be single-partition Bag, "
" delayed object, or Iterable, got %s" % type(other).__name
)
raise TypeError(msg)
if on_other is None:
on_other = on_self
for i in range(self.npartitions):
dsk[(name, i)] = (list, (join, on_other, other, on_self, (self.name, i)))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return type(self)(graph, name, self.npartitions)
def product(self, other):
""" Cartesian product between two bags. """
assert isinstance(other, Bag)
name = "product-" + tokenize(self, other)
n, m = self.npartitions, other.npartitions
dsk = dict(
(
(name, i * m + j),
(list, (itertools.product, (self.name, i), (other.name, j))),
)
for i in range(n)
for j in range(m)
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, other])
return type(self)(graph, name, n * m)
def foldby(
self,
key,
binop,
initial=no_default,
combine=None,
combine_initial=no_default,
split_every=None,
):
""" Combined reduction and groupby.
Foldby provides a combined groupby and reduce for efficient parallel
split-apply-combine tasks.
The computation
>>> b.foldby(key, binop, init) # doctest: +SKIP
is equivalent to the following:
>>> def reduction(group): # doctest: +SKIP
... return reduce(binop, group, init) # doctest: +SKIP
>>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP
But uses minimal communication and so is *much* faster.
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> add = lambda x, y: x + y
>>> dict(b.foldby(iseven, add)) # doctest: +SKIP
{True: 20, False: 25}
**Key Function**
The key function determines how to group the elements in your bag.
In the common case where your bag holds dictionaries then the key
function often gets out one of those elements.
>>> def key(x):
... return x['name']
This case is so common that it is special cased, and if you provide a
key that is not a callable function then dask.bag will turn it into one
automatically. The following are equivalent:
>>> b.foldby(lambda x: x['name'], ...) # doctest: +SKIP
>>> b.foldby('name', ...) # doctest: +SKIP
**Binops**
It can be tricky to construct the right binary operators to perform
analytic queries. The ``foldby`` method accepts two binary operators,
``binop`` and ``combine``. Binary operators two inputs and output must
have the same type.
Binop takes a running total and a new element and produces a new total:
>>> def binop(total, x):
... return total + x['amount']
Combine takes two totals and combines them:
>>> def combine(total1, total2):
... return total1 + total2
Each of these binary operators may have a default first value for
total, before any other value is seen. For addition binary operators
like above this is often ``0`` or the identity element for your
operation.
**split_every**
Group partitions into groups of this size while performing reduction.
Defaults to 8.
>>> b.foldby('name', binop, 0, combine, 0) # doctest: +SKIP
Examples
--------
We can compute the maximum of some ``(key, value)`` pairs, grouped
by the ``key``. (You might be better off converting the ``Bag`` to
a ``dask.dataframe`` and using its groupby).
>>> import random
>>> import dask.bag as db
>>> tokens = list('abcdefg')
>>> values = range(10000)
>>> a = [(random.choice(tokens), random.choice(values))
... for _ in range(100)]
>>> a[:2] # doctest: +SKIP
[('g', 676), ('a', 871)]
>>> a = db.from_sequence(a)
>>> def binop(t, x):
... return max((t, x), key=lambda x: x[1])
>>> a.foldby(lambda x: x[0], binop).compute() # doctest: +SKIP
[('g', ('g', 984)),
('a', ('a', 871)),
('b', ('b', 999)),
('c', ('c', 765)),
('f', ('f', 955)),
('e', ('e', 991)),
('d', ('d', 854))]
See Also
--------
toolz.reduceby
pyspark.combineByKey
"""
if split_every is None:
split_every = 8
if split_every is False:
split_every = self.npartitions
token = tokenize(self, key, binop, initial, combine, combine_initial)
a = "foldby-a-" + token
if combine is None:
combine = binop
if initial is not no_default:
dsk = {
(a, i): (reduceby, key, binop, (self.name, i), initial)
for i in range(self.npartitions)
}
else:
dsk = {
(a, i): (reduceby, key, binop, (self.name, i))
for i in range(self.npartitions)
}
combine2 = partial(chunk.foldby_combine2, combine)
depth = 0
k = self.npartitions
b = a
while k > split_every:
c = b + str(depth)
if combine_initial is not no_default:
for i, inds in enumerate(partition_all(split_every, range(k))):
dsk[(c, i)] = (
reduceby,
0,
combine2,
(toolz.concat, (map, dictitems, [(b, j) for j in inds])),
combine_initial,
)
else:
for i, inds in enumerate(partition_all(split_every, range(k))):
dsk[(c, i)] = (
merge_with,
(partial, reduce, combine),
[(b, j) for j in inds],
)
k = i + 1
b = c
depth += 1
e = "foldby-b-" + token
if combine_initial is not no_default:
dsk[(e, 0)] = (
dictitems,
(
reduceby,
0,
combine2,
(toolz.concat, (map, dictitems, [(b, j) for j in range(k)])),
combine_initial,
),
)
else:
dsk[(e, 0)] = (
dictitems,
(merge_with, (partial, reduce, combine), [(b, j) for j in range(k)]),
)
graph = HighLevelGraph.from_collections(e, dsk, dependencies=[self])
return type(self)(graph, e, 1)
def take(self, k, npartitions=1, compute=True, warn=True):
""" Take the first k elements.
Parameters
----------
k : int
The number of elements to return
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``k`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
warn : bool, optional
Whether to warn if the number of elements returned is less than
requested, default is True.
>>> b = from_sequence(range(10))
>>> b.take(3) # doctest: +SKIP
(0, 1, 2)
"""
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
raise ValueError(
"only {} partitions, take "
"received {}".format(self.npartitions, npartitions)
)
token = tokenize(self, k, npartitions)
name = "take-" + token
if npartitions > 1:
name_p = "take-partial-" + token
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (list, (take, k, (self.name, i)))
concat = (toolz.concat, ([(name_p, i) for i in range(npartitions)]))
dsk[(name, 0)] = (safe_take, k, concat, warn)
else:
dsk = {(name, 0): (safe_take, k, (self.name, 0), warn)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
b = Bag(graph, name, 1)
if compute:
return tuple(b.compute())
else:
return b
def flatten(self):
""" Concatenate nested lists into one long list.
>>> b = from_sequence([[1], [2, 3]])
>>> list(b)
[[1], [2, 3]]
>>> list(b.flatten())
[1, 2, 3]
"""
name = "flatten-" + tokenize(self)
dsk = dict(
((name, i), (list, (toolz.concat, (self.name, i))))
for i in range(self.npartitions)
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return type(self)(graph, name, self.npartitions)
def __iter__(self):
return iter(self.compute())
def groupby(
self,
grouper,
method=None,
npartitions=None,
blocksize=2 ** 20,
max_branch=None,
shuffle=None,
):
""" Group collection by key function
This requires a full dataset read, serialization and shuffle.
This is expensive. If possible you should use ``foldby``.
Parameters
----------
grouper: function
Function on which to group elements
shuffle: str
Either 'disk' for an on-disk shuffle or 'tasks' to use the task
scheduling framework. Use 'disk' if you are on a single machine
and 'tasks' if you are on a distributed cluster.
npartitions: int
If using the disk-based shuffle, the number of output partitions
blocksize: int
If using the disk-based shuffle, the size of shuffle blocks (bytes)
max_branch: int
If using the task-based shuffle, the amount of splitting each
partition undergoes. Increase this for fewer copies but more
scheduler overhead.
Examples
--------
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> dict(b.groupby(iseven)) # doctest: +SKIP
{True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]}
See Also
--------
Bag.foldby
"""
if method is not None:
raise Exception("The method= keyword has been moved to shuffle=")
if shuffle is None:
shuffle = config.get("shuffle", None)
if shuffle is None:
if "distributed" in config.get("scheduler", ""):
shuffle = "tasks"
else:
shuffle = "disk"
if shuffle == "disk":
return groupby_disk(
self, grouper, npartitions=npartitions, blocksize=blocksize
)
elif shuffle == "tasks":
return groupby_tasks(self, grouper, max_branch=max_branch)
else:
msg = "Shuffle must be 'disk' or 'tasks'"
raise NotImplementedError(msg)
def to_dataframe(self, meta=None, columns=None):
""" Create Dask Dataframe from a Dask Bag.
Bag should contain tuples, dict records, or scalars.
Index will not be particularly meaningful. Use ``reindex`` afterwards
if necessary.
Parameters
----------
meta : pd.DataFrame, dict, iterable, optional
An empty ``pd.DataFrame`` that matches the dtypes and column names
of the output. This metadata is necessary for many algorithms in
dask dataframe to work. For ease of use, some alternative inputs
are also available. Instead of a ``DataFrame``, a ``dict`` of
``{name: dtype}`` or iterable of ``(name, dtype)`` can be provided.
If not provided or a list, a single element from the first
partition will be computed, triggering a potentially expensive call
to ``compute``. This may lead to unexpected results, so providing
``meta`` is recommended. For more information, see
``dask.dataframe.utils.make_meta``.
columns : sequence, optional
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the columns.
Otherwise this argument indicates the order of the columns in the
result (any names not found in the data will become all-NA
columns). Note that if ``meta`` is provided, column names will be
taken from there and this parameter is invalid.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence([{'name': 'Alice', 'balance': 100},
... {'name': 'Bob', 'balance': 200},
... {'name': 'Charlie', 'balance': 300}],
... npartitions=2)
>>> df = b.to_dataframe()
>>> df.compute()
name balance
0 Alice 100
1 Bob 200
0 Charlie 300
"""
import pandas as pd
import dask.dataframe as dd
if meta is None:
head = self.take(1, warn=False)
if len(head) == 0:
raise ValueError(
"`dask.bag.Bag.to_dataframe` failed to "
"properly infer metadata, please pass in "
"metadata via the `meta` keyword"
)
meta = pd.DataFrame(list(head), columns=columns)
elif columns is not None:
raise ValueError("Can't specify both `meta` and `columns`")
else:
meta = dd.utils.make_meta(meta)
# Serializing the columns and dtypes is much smaller than serializing
# the empty frame
cols = list(meta.columns)
dtypes = meta.dtypes.to_dict()
name = "to_dataframe-" + tokenize(self, cols, dtypes)
dsk = self.__dask_optimize__(self.dask, self.__dask_keys__())
for i in range(self.npartitions):
dsk[(name, i)] = (to_dataframe, (self.name, i), cols, dtypes)
divisions = [None] * (self.npartitions + 1)
return dd.DataFrame(dsk, name, meta, divisions)
def to_delayed(self, optimize_graph=True):
"""Convert into a list of ``dask.delayed`` objects, one per partition.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
See Also
--------
dask.bag.from_delayed
"""
from dask.delayed import Delayed
keys = self.__dask_keys__()
dsk = self.__dask_graph__()
if optimize_graph:
dsk = self.__dask_optimize__(dsk, keys)
return [Delayed(k, dsk) for k in keys]
def repartition(self, npartitions):
""" Changes the number of partitions of the bag.
This can be used to reduce or increase the number of partitions
of the bag.
Examples
--------
>>> b.repartition(5) # set to have 5 partitions # doctest: +SKIP
"""
new_name = "repartition-%d-%s" % (npartitions, tokenize(self, npartitions))
if npartitions == self.npartitions:
return self
elif npartitions < self.npartitions:
ratio = self.npartitions / npartitions
new_partitions_boundaries = [
int(old_partition_index * ratio)
for old_partition_index in range(npartitions + 1)
]
dsk = {}
for new_partition_index in range(npartitions):
value = (
list,
(
toolz.concat,
[
(self.name, old_partition_index)
for old_partition_index in range(
new_partitions_boundaries[new_partition_index],
new_partitions_boundaries[new_partition_index + 1],
)
],
),
)
dsk[new_name, new_partition_index] = value
else: # npartitions > self.npartitions
ratio = npartitions / self.npartitions
split_name = "split-%s" % tokenize(self, npartitions)
dsk = {}
last = 0
j = 0
for i in range(self.npartitions):
new = last + ratio
if i == self.npartitions - 1:
k = npartitions - j
else:
k = int(new - last)
dsk[(split_name, i)] = (split, (self.name, i), k)
for jj in range(k):
dsk[(new_name, j)] = (operator.getitem, (split_name, i), jj)
j += 1
last = new
graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[self])
return Bag(graph, name=new_name, npartitions=npartitions)
def accumulate(self, binop, initial=no_default):
""" Repeatedly apply binary function to a sequence, accumulating results.
This assumes that the bag is ordered. While this is typically the case
not all Dask.bag functions preserve this property.
Examples
--------
>>> from operator import add
>>> b = from_sequence([1, 2, 3, 4, 5], npartitions=2)
>>> b.accumulate(add).compute() # doctest: +SKIP
[1, 3, 6, 10, 15]
Accumulate also takes an optional argument that will be used as the
first value.
>>> b.accumulate(add, initial=-1) # doctest: +SKIP
[-1, 0, 2, 5, 9, 14]
"""
token = tokenize(self, binop, initial)
binop_name = funcname(binop)
a = "%s-part-%s" % (binop_name, token)
b = "%s-first-%s" % (binop_name, token)
c = "%s-second-%s" % (binop_name, token)
dsk = {
(a, 0): (accumulate_part, binop, (self.name, 0), initial, True),
(b, 0): (first, (a, 0)),
(c, 0): (second, (a, 0)),
}
for i in range(1, self.npartitions):
dsk[(a, i)] = (accumulate_part, binop, (self.name, i), (c, i - 1))
dsk[(b, i)] = (first, (a, i))
dsk[(c, i)] = (second, (a, i))
graph = HighLevelGraph.from_collections(b, dsk, dependencies=[self])
return Bag(graph, b, self.npartitions)
def accumulate_part(binop, seq, initial, is_first=False):
if initial == no_default:
res = list(accumulate(binop, seq))
else:
res = list(accumulate(binop, seq, initial=initial))
if is_first:
return res, res[-1] if res else [], initial
return res[1:], res[-1]
def partition(grouper, sequence, npartitions, p, nelements=2 ** 20):
""" Partition a bag along a grouper, store partitions on disk. """
for block in partition_all(nelements, sequence):
d = groupby(grouper, block)
d2 = defaultdict(list)
for k, v in d.items():
d2[abs(hash(k)) % npartitions].extend(v)
p.append(d2, fsync=True)
return p
def collect(grouper, group, p, barrier_token):
""" Collect partitions from disk and yield k,v group pairs. """
d = groupby(grouper, p.get(group, lock=False))
return list(d.items())
def from_sequence(seq, partition_size=None, npartitions=None):
""" Create a dask Bag from Python sequence.
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Examples
--------
>>> b = from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
See Also
--------
read_text: Create bag from text files
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = "from_sequence-" + tokenize(seq, partition_size)
if len(parts) > 0:
d = dict(((name, i), list(part)) for i, part in enumerate(parts))
else:
d = {(name, 0): []}
return Bag(d, name, len(d))
def from_url(urls):
"""Create a dask Bag from a url.
Examples
--------
>>> a = from_url('http://raw.githubusercontent.com/dask/dask/master/README.rst') # doctest: +SKIP
>>> a.npartitions # doctest: +SKIP
1
>>> a.take(8) # doctest: +SKIP
(b'Dask\\n',
b'====\\n',
b'\\n',
b'|Build Status| |Coverage| |Doc Status| |Gitter| |Version Status|\\n',
b'\\n',
b'Dask is a flexible parallel computing library for analytics. See\\n',
b'documentation_ for more information.\\n',
b'\\n')
>>> b = from_url(['http://github.com', 'http://google.com']) # doctest: +SKIP
>>> b.npartitions # doctest: +SKIP
2
"""
if isinstance(urls, str):
urls = [urls]
name = "from_url-" + uuid.uuid4().hex
dsk = {}
for i, u in enumerate(urls):
dsk[(name, i)] = (list, (urlopen, u))
return Bag(dsk, name, len(urls))
def dictitems(d):
""" A pickleable version of dict.items
>>> dictitems({'x': 1})
[('x', 1)]
"""
return list(d.items())
def concat(bags):
""" Concatenate many bags together, unioning all elements.
>>> import dask.bag as db
>>> a = db.from_sequence([1, 2, 3])
>>> b = db.from_sequence([4, 5, 6])
>>> c = db.concat([a, b])
>>> list(c)
[1, 2, 3, 4, 5, 6]
"""
name = "concat-" + tokenize(*bags)
counter = itertools.count(0)
dsk = {(name, next(counter)): key for bag in bags for key in bag.__dask_keys__()}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags)
return Bag(graph, name, len(dsk))
def reify(seq):
if isinstance(seq, Iterator):
seq = list(seq)
if len(seq) and isinstance(seq[0], Iterator):
seq = list(map(list, seq))
return seq
def from_delayed(values):
""" Create bag from many dask Delayed objects.
These objects will become the partitions of the resulting Bag. They should
evaluate to a ``list`` or some other concrete sequence.
Parameters
----------
values: list of delayed values
An iterable of dask Delayed objects. Each evaluating to a list.
Returns
-------
Bag
Examples
--------
>>> x, y, z = [delayed(load_sequence_from_file)(fn)
... for fn in filenames] # doctest: +SKIP
>>> b = from_delayed([x, y, z]) # doctest: +SKIP
See also
--------
dask.delayed
"""
from dask.delayed import Delayed, delayed
if isinstance(values, Delayed):
values = [values]
values = [
delayed(v) if not isinstance(v, Delayed) and hasattr(v, "key") else v
for v in values
]
name = "bag-from-delayed-" + tokenize(*values)
names = [(name, i) for i in range(len(values))]
values2 = [(reify, v.key) for v in values]
dsk = dict(zip(names, values2))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=values)
return Bag(graph, name, len(values))
def chunk_distinct(seq, key=None):
if key is not None and not callable(key):
key = partial(chunk.getitem, key=key)
return list(unique(seq, key=key))
def merge_distinct(seqs, key=None):
return chunk_distinct(toolz.concat(seqs), key=key)
def merge_frequencies(seqs):
if isinstance(seqs, Iterable):
seqs = list(seqs)
if not seqs:
return {}
first, rest = seqs[0], seqs[1:]
if not rest:
return first
out = defaultdict(int)
out.update(first)
for d in rest:
for k, v in d.items():
out[k] += v
return out
def bag_range(n, npartitions):
""" Numbers from zero to n
Examples
--------
>>> import dask.bag as db
>>> b = db.range(5, npartitions=2)
>>> list(b)
[0, 1, 2, 3, 4]
"""
size = n // npartitions
name = "range-%d-npartitions-%d" % (n, npartitions)
ijs = list(enumerate(take(npartitions, range(0, n, size))))
dsk = dict(((name, i), (reify, (range, j, min(j + size, n)))) for i, j in ijs)
if n % npartitions != 0:
i, j = ijs[-1]
dsk[(name, i)] = (reify, (range, j, n))
return Bag(dsk, name, npartitions)
def bag_zip(*bags):
""" Partition-wise bag zip
All passed bags must have the same number of partitions.
NOTE: corresponding partitions should have the same length; if they do not,
the "extra" elements from the longer partition(s) will be dropped. If you
have this case chances are that what you really need is a data alignment
mechanism like pandas's, and not a missing value filler like zip_longest.
Examples
--------
Correct usage:
>>> import dask.bag as db
>>> evens = db.from_sequence(range(0, 10, 2), partition_size=4)
>>> odds = db.from_sequence(range(1, 10, 2), partition_size=4)
>>> pairs = db.zip(evens, odds)
>>> list(pairs)
[(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]
Incorrect usage:
>>> numbers = db.range(20) # doctest: +SKIP
>>> fizz = numbers.filter(lambda n: n % 3 == 0) # doctest: +SKIP
>>> buzz = numbers.filter(lambda n: n % 5 == 0) # doctest: +SKIP
>>> fizzbuzz = db.zip(fizz, buzz) # doctest: +SKIP
>>> list(fizzbuzzz) # doctest: +SKIP
[(0, 0), (3, 5), (6, 10), (9, 15), (12, 20), (15, 25), (18, 30)]
When what you really wanted was more along the lines of the following:
>>> list(fizzbuzzz) # doctest: +SKIP
[(0, 0), (3, None), (None, 5), (6, None), (None 10), (9, None),
(12, None), (15, 15), (18, None), (None, 20), (None, 25), (None, 30)]
"""
npartitions = bags[0].npartitions
assert all(bag.npartitions == npartitions for bag in bags)
# TODO: do more checks
name = "zip-" + tokenize(*bags)
dsk = dict(
((name, i), (reify, (zip,) + tuple((bag.name, i) for bag in bags)))
for i in range(npartitions)
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags)
return Bag(graph, name, npartitions)
def map_chunk(f, iters, iter_kwarg_keys=None, kwargs=None):
"""Map ``f`` across one or more iterables, maybe with keyword arguments.
Low-level function used in ``bag_map``, not user facing.
Arguments
---------
f : callable
iters : List[Iterable]
iter_kwarg_keys : List[str] or None
Keyword names to use for pair with the tail end of ``iters``, allowing
keyword arguments to be passed in from iterators.
kwargs : dict or None
Additional constant keyword arguments to use on every call to ``f``.
"""
if kwargs:
f = partial(f, **kwargs)
iters = [iter(a) for a in iters]
return _MapChunk(f, iters, kwarg_keys=iter_kwarg_keys)
class _MapChunk(Iterator):
def __init__(self, f, iters, kwarg_keys=None):
self.f = f
self.iters = iters
self.kwarg_keys = kwarg_keys or ()
self.nkws = len(self.kwarg_keys)
def __next__(self):
try:
vals = [next(i) for i in self.iters]
except StopIteration:
self.check_all_iterators_consumed()
raise
if self.nkws:
args = vals[: -self.nkws]
kwargs = dict(zip(self.kwarg_keys, vals[-self.nkws :]))
return self.f(*args, **kwargs)
return self.f(*vals)
def check_all_iterators_consumed(self):
if len(self.iters) > 1:
for i in self.iters:
if isinstance(i, itertools.repeat):
continue
try:
next(i)
except StopIteration:
pass
else:
msg = (
"map called with multiple bags that aren't identically "
"partitioned. Please ensure that all bag arguments "
"have the same partition lengths"
)
raise ValueError(msg)
def starmap_chunk(f, x, kwargs):
if kwargs:
f = partial(f, **kwargs)
return itertools.starmap(f, x)
def unpack_scalar_dask_kwargs(kwargs):
"""Extracts dask values from kwargs.
Currently only ``dask.bag.Item`` and ``dask.delayed.Delayed`` are
supported. Returns a merged dask graph and a task resulting in a keyword
dict.
"""
kwargs2 = {}
dependencies = []
for k, v in kwargs.items():
vv, collections = unpack_collections(v)
if not collections:
kwargs2[k] = v
else:
kwargs2[k] = vv
dependencies.extend(collections)
if dependencies:
kwargs2 = (dict, (zip, list(kwargs2), list(kwargs2.values())))
return kwargs2, dependencies
def bag_map(func, *args, **kwargs):
"""Apply a function elementwise across one or more bags.
Note that all ``Bag`` arguments must be partitioned identically.
Parameters
----------
func : callable
*args, **kwargs : Bag, Item, Delayed, or object
Arguments and keyword arguments to pass to ``func``. Non-Bag args/kwargs
are broadcasted across all calls to ``func``.
Notes
-----
For calls with multiple `Bag` arguments, corresponding partitions should
have the same length; if they do not, the call will error at compute time.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(5), npartitions=2)
>>> b2 = db.from_sequence(range(5, 10), npartitions=2)
Apply a function to all elements in a bag:
>>> db.map(lambda x: x + 1, b).compute()
[1, 2, 3, 4, 5]
Apply a function with arguments from multiple bags:
>>> from operator import add
>>> db.map(add, b, b2).compute()
[5, 7, 9, 11, 13]
Non-bag arguments are broadcast across all calls to the mapped function:
>>> db.map(add, b, 1).compute()
[1, 2, 3, 4, 5]
Keyword arguments are also supported, and have the same semantics as
regular arguments:
>>> def myadd(x, y=0):
... return x + y
>>> db.map(myadd, b, y=b2).compute()
[5, 7, 9, 11, 13]
>>> db.map(myadd, b, y=1).compute()
[1, 2, 3, 4, 5]
Both arguments and keyword arguments can also be instances of
``dask.bag.Item`` or ``dask.delayed.Delayed``. Here we'll add the max value
in the bag to each element:
>>> db.map(myadd, b, b.max()).compute()
[4, 5, 6, 7, 8]
"""
name = "%s-%s" % (funcname(func), tokenize(func, "map", *args, **kwargs))
dsk = {}
dependencies = []
bags = []
args2 = []
for a in args:
if isinstance(a, Bag):
bags.append(a)
args2.append(a)
elif isinstance(a, (Item, Delayed)):
dependencies.append(a)
args2.append((itertools.repeat, a.key))
else:
args2.append((itertools.repeat, a))
bag_kwargs = {}
other_kwargs = {}
for k, v in kwargs.items():
if isinstance(v, Bag):
bag_kwargs[k] = v
bags.append(v)
else:
other_kwargs[k] = v
other_kwargs, collections = unpack_scalar_dask_kwargs(other_kwargs)
dependencies.extend(collections)
if not bags:
raise ValueError("At least one argument must be a Bag.")
npartitions = {b.npartitions for b in bags}
if len(npartitions) > 1:
raise ValueError("All bags must have the same number of partitions.")
npartitions = npartitions.pop()
def build_iters(n):
args = [(a.name, n) if isinstance(a, Bag) else a for a in args2]
if bag_kwargs:
args.extend((b.name, n) for b in bag_kwargs.values())
return args
if bag_kwargs:
iter_kwarg_keys = list(bag_kwargs)
else:
iter_kwarg_keys = None
dsk = {
(name, n): (
reify,
(map_chunk, func, build_iters(n), iter_kwarg_keys, other_kwargs),
)
for n in range(npartitions)
}
# If all bags are the same type, use that type, otherwise fallback to Bag
return_type = set(map(type, bags))
return_type = return_type.pop() if len(return_type) == 1 else Bag
graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags + dependencies)
return return_type(graph, name, npartitions)
def map_partitions(func, *args, **kwargs):
"""Apply a function to every partition across one or more bags.
Note that all ``Bag`` arguments must be partitioned identically.
Parameters
----------
func : callable
*args, **kwargs : Bag, Item, Delayed, or object
Arguments and keyword arguments to pass to ``func``.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(1, 101), npartitions=10)
>>> def div(nums, den=1):
... return [num / den for num in nums]
Using a python object:
>>> hi = b.max().compute()
>>> hi
100
>>> b.map_partitions(div, den=hi).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Using an ``Item``:
>>> b.map_partitions(div, den=b.max()).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Note that while both versions give the same output, the second forms a
single graph, and then computes everything at once, and in some cases
may be more efficient.
"""
name = "%s-%s" % (funcname(func), tokenize(func, "map-partitions", *args, **kwargs))
dsk = {}
dependencies = []
bags = []
args2 = []
for a in args:
if isinstance(a, Bag):
bags.append(a)
args2.append(a)
elif isinstance(a, (Item, Delayed)):
args2.append(a.key)
dependencies.append(a)
else:
args2.append(a)
bag_kwargs = {}
other_kwargs = {}
for k, v in kwargs.items():
if isinstance(v, Bag):
bag_kwargs[k] = v
bags.append(v)
else:
other_kwargs[k] = v
other_kwargs, collections = unpack_scalar_dask_kwargs(other_kwargs)
dependencies.extend(collections)
if not bags:
raise ValueError("At least one argument must be a Bag.")
npartitions = {b.npartitions for b in bags}
if len(npartitions) > 1:
raise ValueError("All bags must have the same number of partitions.")
npartitions = npartitions.pop()
def build_args(n):
return [(a.name, n) if isinstance(a, Bag) else a for a in args2]
def build_bag_kwargs(n):
if not bag_kwargs:
return {}
return (
dict,
(zip, list(bag_kwargs), [(b.name, n) for b in bag_kwargs.values()]),
)
if kwargs:
dsk = {
(name, n): (
apply,
func,
build_args(n),
(merge, build_bag_kwargs(n), other_kwargs),
)
for n in range(npartitions)
}
else:
dsk = {(name, n): (func,) + tuple(build_args(n)) for n in range(npartitions)}
# If all bags are the same type, use that type, otherwise fallback to Bag
return_type = set(map(type, bags))
return_type = return_type.pop() if len(return_type) == 1 else Bag
graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags + dependencies)
return return_type(graph, name, npartitions)
def _reduce(binop, sequence, initial=no_default):
if initial is not no_default:
return reduce(binop, sequence, initial)
else:
return reduce(binop, sequence)
def make_group(k, stage):
def h(x):
return x[0] // k ** stage % k
return h
def groupby_tasks(b, grouper, hash=hash, max_branch=32):
max_branch = max_branch or 32
n = b.npartitions
stages = int(math.ceil(math.log(n) / math.log(max_branch))) or 1
if stages > 1:
k = int(math.ceil(n ** (1 / stages)))
else:
k = n
groups = []
splits = []
joins = []
inputs = [tuple(digit(i, j, k) for j in range(stages)) for i in range(k ** stages)]
b2 = b.map(partial(chunk.groupby_tasks_group_hash, hash=hash, grouper=grouper))
token = tokenize(b, grouper, hash, max_branch)
shuffle_join_name = "shuffle-join-" + token
shuffle_group_name = "shuffle-group-" + token
shuffle_split_name = "shuffle-split-" + token
start = {}
for idx, inp in enumerate(inputs):
group = {}
split = {}
if idx < b.npartitions:
start[(shuffle_join_name, 0, inp)] = (b2.name, idx)
else:
start[(shuffle_join_name, 0, inp)] = []
for stage in range(1, stages + 1):
_key_tuple = (shuffle_group_name, stage, inp)
group[_key_tuple] = (
groupby,
(make_group, k, stage - 1),
(shuffle_join_name, stage - 1, inp),
)
for i in range(k):
split[(shuffle_split_name, stage, i, inp)] = (
dict.get,
_key_tuple,
i,
{},
)
groups.append(group)
splits.append(split)
for stage in range(1, stages + 1):
join = dict(
(
(shuffle_join_name, stage, inp),
(
list,
(
toolz.concat,
[
(
shuffle_split_name,
stage,
inp[stage - 1],
insert(inp, stage - 1, j),
)
for j in range(k)
],
),
),
)
for inp in inputs
)
joins.append(join)
name = "shuffle-" + token
end = dict(
((name, i), (list, (dict.items, (groupby, grouper, (pluck, 1, j)))),)
for i, j in enumerate(join)
)
groups.extend(splits)
groups.extend(joins)
dsk = merge(start, end, *(groups))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b2])
return type(b)(graph, name, len(inputs))
def groupby_disk(b, grouper, npartitions=None, blocksize=2 ** 20):
if npartitions is None:
npartitions = b.npartitions
token = tokenize(b, grouper, npartitions, blocksize)
import partd
p = ("partd-" + token,)
dirname = config.get("temporary_directory", None)
if dirname:
file = (apply, partd.File, (), {"dir": dirname})
else:
file = (partd.File,)
try:
dsk1 = {p: (partd.Python, (partd.Snappy, file))}
except AttributeError:
dsk1 = {p: (partd.Python, file)}
# Partition data on disk
name = "groupby-part-{0}-{1}".format(funcname(grouper), token)
dsk2 = dict(
((name, i), (partition, grouper, (b.name, i), npartitions, p, blocksize))
for i in range(b.npartitions)
)
# Barrier
barrier_token = "groupby-barrier-" + token
dsk3 = {barrier_token: (chunk.barrier,) + tuple(dsk2)}
# Collect groups
name = "groupby-collect-" + token
dsk4 = dict(
((name, i), (collect, grouper, i, p, barrier_token)) for i in range(npartitions)
)
dsk = merge(dsk1, dsk2, dsk3, dsk4)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b])
return type(b)(graph, name, npartitions)
def empty_safe_apply(func, part, is_last):
if isinstance(part, Iterator):
try:
_, part = peek(part)
except StopIteration:
if not is_last:
return no_result
return func(part)
elif not is_last and len(part) == 0:
return no_result
else:
return func(part)
def empty_safe_aggregate(func, parts, is_last):
parts2 = (p for p in parts if p is not no_result)
return empty_safe_apply(func, parts2, is_last)
def safe_take(n, b, warn=True):
r = list(take(n, b))
if len(r) != n and warn:
warnings.warn(
"Insufficient elements for `take`. {0} elements "
"requested, only {1} elements available. Try passing "
"larger `npartitions` to `take`.".format(n, len(r))
)
return r
def random_sample(x, state_data, prob):
"""Filter elements of `x` by a probability `prob`.
Parameters
----------
x : iterable
state_data : tuple
A tuple that can be passed to ``random.Random.setstate``.
prob : float
A float between 0 and 1, representing the probability that each
element will be yielded.
"""
random_state = Random()
random_state.setstate(state_data)
for i in x:
if random_state.random() < prob:
yield i
def random_state_data_python(n, random_state=None):
"""Return a list of tuples that can be passed to
``random.Random.setstate``.
Parameters
----------
n : int
Number of tuples to return.
random_state : int or ``random.Random``, optional
If an int, is used to seed a new ``random.Random``.
"""
if not isinstance(random_state, Random):
random_state = Random(random_state)
maxuint32 = 1 << 32
return [
(
3,
tuple(random_state.randint(0, maxuint32) for i in range(624)) + (624,),
None,
)
for i in range(n)
]
def split(seq, n):
""" Split apart a sequence into n equal pieces.
>>> split(range(10), 3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
"""
if not isinstance(seq, (list, tuple)):
seq = list(seq)
part = len(seq) / n
L = [seq[int(part * i) : int(part * (i + 1))] for i in range(n - 1)]
L.append(seq[int(part * (n - 1)) :])
return L
def to_dataframe(seq, columns, dtypes):
import pandas as pd
seq = reify(seq)
# pd.DataFrame expects lists, only copy if necessary
if not isinstance(seq, list):
seq = list(seq)
res = pd.DataFrame(seq, columns=list(columns))
return res.astype(dtypes, copy=False)
| bsd-3-clause |
hajicj/muscima | scripts/analyze_tracking_log.py | 1 | 16341 | #!/usr/bin/env python
"""``analyze_tracking_log.py`` is a script that performs a quick and dirty analysis
of a MUSCIMarker event log. It is not necessary for using the dataset,
but you might want it running if you are annotating something with MUSCIMarker.
For an overview of command-line options, call::
analyze_tracking_log.py -h
What does the script track?
* Number of minutes/hours worked
* Speed: how much was done in total?
* Densities: frequency of events (calls) per minute/hour
Visualizations:
* Timing visualization
Also, convert to CSV, to make it grep-able? First: fixed-name cols,
then: args dict, formatted as key=value,key=value
"""
from __future__ import print_function, unicode_literals, division
from builtins import str
from builtins import range
import argparse
import codecs
import collections
import io
import itertools
import json
import logging
import numpy
import os
import pprint
import time
import matplotlib.pyplot as plt
import operator
from muscima.io import parse_cropobject_list
__version__ = "0.0.1"
__author__ = "Jan Hajic jr."
def freqdict(l, sort=True):
out = collections.defaultdict(int)
for item in l:
out[item] += 1
if sort:
s_out = collections.OrderedDict()
for k, v in sorted(list(out.items()), key=operator.itemgetter(1), reverse=True):
s_out[k] = v
out = s_out
return out
##############################################################################
def is_annotation_package(path):
"""Checks that the given path is an annotation package."""
if not os.path.isdir(path):
return False
subdirs = os.listdir(path)
if 'source_images' not in subdirs:
return False
if 'annotations' not in subdirs:
return False
if 'annotation_logs' not in subdirs:
return False
return True
def logs_from_package(package):
"""Collects all log file names (with complete paths) from the given package.
:param package: Path to the annotations package.
:return: List of filenames (full paths).
"""
logging.info('Collecting log files from package {0}'.format(package))
if not os.path.isdir(package):
raise OSError('Package {0} not found!'.format(package))
log_path = os.path.join(package, 'annotation_logs')
if not os.path.isdir(log_path):
raise ValueError('Package {0}: annotation_logs not found, probably not a package.'
''.format(package))
# Collect all log days
log_days = os.listdir(log_path)
# Dealing with people who copied the entire .muscimarker-tracking directory
# (potentially without the dot, as just "muscimarker-tracking")
if len(log_days) == 0:
logging.info('No logs in package {0}!'.format(package))
return []
if log_days[-1].endswith('muscimarker-tracking'):
log_path = os.path.join(log_path, log_days[-1])
log_days = os.listdir(log_path)
log_files = []
for day in log_days:
# .DS_store and other hidden files
if day.startswith('.'):
continue
# Dealing with people who copied only the JSON files
if day.endswith('json'):
logging.info('Found log file that is not inside a day dir: {0}'
''.format(day))
log_files.append(os.path.join(log_path, day))
continue
if day.endswith('xml'):
logging.info('Log file is for some reason XML instead of JSON; copied wrong files???')
continue
day_log_path = os.path.join(log_path, day)
day_log_files = [os.path.join(day_log_path, l)
for l in os.listdir(day_log_path)]
log_files += day_log_files
logging.info('In package {0}: found {1} log files.'
''.format(package, len(log_files)))
logging.debug('In package {0}: log files:\n{1}'
''.format(package, pprint.pformat(log_files)))
return log_files
def try_correct_crashed_json(fname):
"""Attempts to correct an incomplete JSON list file: if MUSCIMarker
crashed, the items list would not get correctly closed. We attempt
to remove the last comma and add a closing bracket (`]`) on a new
line instead, and return the object as a (unicode) string.
>>> json = '''
... [
... {'something': 'this', 'something': 'that'},'''
"""
with open(fname, 'r') as hdl:
lines = [l.rstrip() for l in hdl]
if lines[-1][-1] == ',':
logging.info('Correcting JSON: found hanging comma!')
lines[-1] = lines[-1][:-1]
lines.append(']')
return '\n'.join(lines)
else:
logging.info('No hanging comma, cannot deal with this situation.')
return None
def unique_logs(event_logs):
"""Checks that the event logs are unique using the start event
timestamp. Returns a list of unique event logs. If two have the same
timestamp, the first one is used.
For logging purposes, expects a dict of event logs. Keys are log file names,
values are the event lists.
"""
unique = collections.OrderedDict()
for log_file, l in event_logs.items():
if len(l) < 1:
logging.info('Got an empty log from file {0}'.format(log_file))
continue
init_event = l[0]
if '-time-' not in init_event:
raise ValueError('Got a non-event log JSON list, file {0}! Supposed init event: {1}'
''.format(log_file, init_event))
init_time = init_event['-time-']
if init_time in unique:
logging.info('Found non-unique event log {0} with timestamp {1} ({2} events)!'
' Using first ({3} events).'
''.format(log_file, init_time, len(l), len(unique[init_time])))
else:
unique[init_time] = l
return list(unique.values())
##############################################################################
# Counting results
def annotations_from_package(package):
"""Collect all annotation XML files (with complete paths)
from the given package."""
logging.info('Collecting annotation files from package {0}'.format(package))
if not os.path.isdir(package):
raise OSError('Package {0} not found!'.format(package))
annot_path = os.path.join(package, 'annotations')
if not os.path.isdir(annot_path):
raise ValueError('Package {0}: annotations not found, probably not a package.'
''.format(package))
# Collect all annotations
annotation_files = [os.path.join(annot_path, f)
for f in os.listdir(annot_path) if f.endswith('.xml')]
return annotation_files
def count_cropobjects(annot_file):
return len(parse_cropobject_list(annot_file))
def count_cropobjects_and_relationships(annot_file):
cropobjects = parse_cropobject_list(annot_file)
n_inlinks = 0
for c in cropobjects:
if c.inlinks is not None:
n_inlinks += len(c.inlinks)
return len(cropobjects), n_inlinks
##############################################################################
# Visualization
def events_by_time_units(events, seconds_per_unit=60):
"""Puts the events into bins that correspond to equally spaced
intervals of time. The length of time covered by one bin is
given by seconds_per_unit."""
# Get first event time
start_time = min([float(e['-time-']) for e in events])
# The events do not have to come in-order
bins = collections.defaultdict(list)
for e in events:
t = float(e['-time-'])
n_bin = int(t - start_time) // int(seconds_per_unit)
bins[n_bin].append(e)
return bins
def plot_events_by_time(events, type_key='-fn-'):
"""Simple scatterplot visualization.
All events are expected to have a -fn- component."""
fns = [e['-fn-'] for e in events]
# Assign numbers to tracked fns
fns_by_freq = {f: len([e for e in fns if e == f]) for f in set(fns)}
fn_dict = {f: i for i, f in enumerate(sorted(list(fns_by_freq.keys()),
reverse=True,
key=lambda k: fns_by_freq[k]))}
min_time = float(events[0]['-time-'])
dataset = numpy.zeros((len(events), 2))
for i, e in enumerate(events):
dataset[i][0] = float(e['-time-']) - min_time
dataset[i][1] = fn_dict[e[type_key]]
# Now visualize
plt.scatter(dataset[:,0], dataset[:,1])
def format_as_timeflow_csv(events, delimiter='\t'):
"""There is a cool offline visualization tool caled TimeFlow,
which has a timeline app. It needs a pretty specific CSV format
to work, though."""
# What we need:
# - ID
# - Date (human?)
# - The common fields:
min_second = int(min([float(e['-time-']) for e in events]))
def format_date(e):
# return '-'.join(reversed(time_human.replace(':', '-').split('__')))
# time_human = e['-time-human-']
time = float(e['-time-'])
return str(int(time) - min_second)
# Collect all events that are in the data.
event_fields = freqdict(list(itertools.chain(*[list(e.keys()) for e in events])))
output_fields = ['ID', 'Date'] + list(event_fields.keys())
n_fields = len(output_fields)
field2idx = {f: i+2 for i, f in enumerate(event_fields.keys())}
event_table = [['' for _ in range(n_fields)] for _ in events]
for i, e in enumerate(events):
event_table[i][0] = str(i)
event_table[i][1] = format_date(e)#format_date(e['-time-human-'])
for k, v in e.items():
event_table[i][field2idx[k]] = v
# Add labels to event table to get the complete data
# that should be formatted as TSV
output_data = [output_fields] + event_table
output_lines = ['\t'.join(row) for row in output_data]
output_string = '\n'.join(output_lines)
return output_string
##############################################################################
def build_argument_parser():
parser = argparse.ArgumentParser(description=__doc__, add_help=True,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-i', '--inputs', nargs='+', action='store',
help='Log files to be analyzed.')
parser.add_argument('-p', '--packages', nargs='+', action='store',
help='Annotation package. If set, will pull'
' all log files in the package.')
parser.add_argument('-a', '--annotator', action='store',
help='Annotator. If set, will pull all log files'
' from all packages in the given person\'s'
' annotation directory')
parser.add_argument('-c', '--count_annotations', action='store_true',
help='If given, will collect annotation files from the'
' supplied packages (or per-annotator packages)'
' and compute object/rel counts and efficiency statistics.')
parser.add_argument('--no_training', action='store_true',
help='If given, will ignore packages with "training" in their name.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Turn on INFO messages.')
parser.add_argument('--debug', action='store_true',
help='Turn on DEBUG messages.')
return parser
def main(args):
logging.info('Starting main...')
_start_time = time.clock()
if args.annotator is not None:
logging.info('Collecting annotation packages for annotator {0}'
''.format(args.annotator))
# Collect all packages, incl. training
packages = []
for d in os.listdir(args.annotator):
package_candidate = os.path.join(args.annotator, d)
if not is_annotation_package(package_candidate):
continue
packages.append(package_candidate)
logging.info('Found: {0} packages'.format(len(packages)))
args.packages = packages
if args.packages is not None:
logging.info('Collecting log files for {0} packages.'.format(len(args.packages)))
log_files = []
for package in args.packages:
current_log_files = logs_from_package(package)
log_files += current_log_files
logging.info('Found: {0} log files'.format(len(log_files)))
args.input = log_files
log_data_per_file = {}
for input_file in args.input:
if not os.path.isfile(input_file):
raise ValueError('Log file {0} not found!'.format(input_file))
current_log_data = []
with codecs.open(input_file, 'r', 'utf-8') as hdl:
try:
current_log_data = json.load(hdl)
except ValueError:
logging.info('Could not parse JSON file {0}'.format(input_file))
logging.info('Attempting to correct file.')
corrected = try_correct_crashed_json(input_file)
if corrected is not None:
logging.info('Attempting to parse corrected JSON.')
try:
current_log_data = json.loads(corrected)
except ValueError:
logging.warning('Could not even parse corrected JSON, skipping file {0}.'.format(input_file))
#raise
logging.info('Success!')
else:
logging.info('Unable to correct JSON, skipping file.')
log_data_per_file[input_file] = current_log_data
logging.info('Checking logs for uniqueness. Started with {0} log files.'
''.format(len(log_data_per_file)))
log_data_per_file = unique_logs(log_data_per_file)
logging.info('After uniqueness check: {0} logs left.'.format(len(log_data_per_file)))
log_data = [e for e in itertools.chain(*log_data_per_file)]
if len(log_data) == 0:
print('Received no log data! Skipping ahead to count annotations.')
n_minutes = None
n_hours = None
else:
logging.info('Parsed {0} data items.'.format(len(log_data)))
# Your code goes here
# raise NotImplementedError()
# Frequency by -fn-:
freq_by_fn = freqdict([l.get('-fn-', None) for l in log_data])
by_minute = events_by_time_units(log_data)
by_minute_freq = {k: len(v) for k, v in list(by_minute.items())}
n_minutes = len(by_minute)
print('# minutes worked: {0}'.format(n_minutes))
n_hours = n_minutes / 60.0
print('# hours worked: {0:.2f}'.format(n_hours))
print('CZK@120: {0:.3f}'.format(n_hours * 120))
print('CZK@150: {0:.3f}'.format(n_hours * 150))
print('CZK@180: {0:.3f}'.format(n_hours * 180))
print('Avg. events per minute: {0}'.format(float(len(log_data)) / n_minutes))
if args.count_annotations:
if args.packages is None:
raise ValueError('Cannot count annotations if no packages are given!')
n_cropobjects = 0
n_relationships = 0
for package in args.packages:
annot_files = annotations_from_package(package)
for f in annot_files:
n_c, n_r = count_cropobjects_and_relationships(f)
n_cropobjects += n_c
n_relationships += n_r
print('Total CropObjects: {0}'.format(n_cropobjects))
print('Total Relationships: {0}'.format(n_relationships))
if n_minutes is not None:
print('Cropobjects per minute: {0:.2f}'.format(n_cropobjects / float(n_minutes)))
_end_time = time.clock()
logging.info('analyze_tracking_log.py done in {0:.3f} s'.format(_end_time - _start_time))
##############################################################################
if __name__ == '__main__':
parser = build_argument_parser()
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
if args.debug:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
main(args)
| mit |
futurulus/scipy | scipy/stats/_binned_statistic.py | 26 | 17723 | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import callable
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for a set of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : array_like
A sequence of values to be binned.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``.
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First a basic example:
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled', alpha=0.2,
... label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, xy = binned_statistic_dd([x], values, statistic,
bins, range)
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
return BinnedStatisticResult(medians, edges[0], xy)
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None):
"""
Compute a bidimensional binned statistic for a set of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (M,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx=ny=bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edges = y_edges = bins),
* the bin edges in each dimension (x_edges, y_edges = bins).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin
x_edges : (nx + 1) ndarray
The bin edges along the first dimension.
y_edges : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as `values`.
See Also
--------
numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
.. versionadded:: 0.11.0
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, xy = binned_statistic_dd([x, y], values, statistic,
bins, range)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
return BinnedStatistic2dResult(medians, edges[0], edges[1], xy)
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as x.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
np.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# This code is based on np.histogramdd
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D * [None]
dedges = D * [None]
try:
M = len(bins)
if M != D:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = D * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(0), float))
smax = np.atleast_1d(np.array(sample.max(0), float))
else:
smin = np.zeros(D)
smax = np.zeros(D)
for i in np.arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in np.arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in np.arange(D):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in np.arange(D):
Ncount[i] = np.digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in np.arange(D):
# Rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal)
== np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
xy = np.zeros(N, int)
for i in np.arange(0, D - 1):
xy += Ncount[ni[i]] * nbin[ni[i + 1:]].prod()
xy += Ncount[ni[-1]]
result = np.empty(nbin.prod(), float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
a = flatcount.nonzero()
result[a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
flatsum2 = np.bincount(xy, values ** 2)
a = flatcount.nonzero()
result[a] = np.sqrt(flatsum2[a] / flatcount[a]
- (flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(xy, None)
a = np.arange(len(flatcount))
result[a] = flatcount
elif statistic == 'sum':
result.fill(0)
flatsum = np.bincount(xy, values)
a = np.arange(len(flatsum))
result[a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(xy):
result[i] = np.median(values[xy == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(xy):
result[i] = statistic(values[xy == i])
# Shape into a proper matrix
result = result.reshape(np.sort(nbin))
for i in np.arange(nbin.size):
j = ni.argsort()[i]
result = result.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D * [slice(1, -1)]
result = result[core]
if (result.shape != nbin - 2).any():
raise RuntimeError('Internal Shape Error')
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
return BinnedStatisticddResult(result, edges, xy)
| bsd-3-clause |
wwf5067/statsmodels | statsmodels/sandbox/examples/try_quantile_regression.py | 33 | 1302 | '''Example to illustrate Quantile Regression
Author: Josef Perktold
'''
import numpy as np
from statsmodels.compat.python import zip
import statsmodels.api as sm
from statsmodels.regression.quantile_regression import QuantReg
sige = 5
nobs, k_vars = 500, 5
x = np.random.randn(nobs, k_vars)
#x[:,0] = 1
y = x.sum(1) + sige * (np.random.randn(nobs)/2 + 1)**3
p = 0.5
exog = np.column_stack((np.ones(nobs), x))
res_qr = QuantReg(y, exog).fit(p)
res_qr2 = QuantReg(y, exog).fit(0.25)
res_qr3 = QuantReg(y, exog).fit(0.75)
res_ols = sm.OLS(y, exog).fit()
##print 'ols ', res_ols.params
##print '0.25', res_qr2
##print '0.5 ', res_qr
##print '0.75', res_qr3
params = [res_ols.params, res_qr2.params, res_qr.params, res_qr3.params]
labels = ['ols', 'qr 0.25', 'qr 0.5', 'qr 0.75']
import matplotlib.pyplot as plt
#sortidx = np.argsort(y)
fitted_ols = np.dot(res_ols.model.exog, params[0])
sortidx = np.argsort(fitted_ols)
x_sorted = res_ols.model.exog[sortidx]
fitted_ols = np.dot(x_sorted, params[0])
plt.figure()
plt.plot(y[sortidx], 'o', alpha=0.75)
for lab, beta in zip(['ols', 'qr 0.25', 'qr 0.5', 'qr 0.75'], params):
print('%-8s'%lab, np.round(beta, 4))
fitted = np.dot(x_sorted, beta)
lw = 2 if lab == 'ols' else 1
plt.plot(fitted, lw=lw, label=lab)
plt.legend()
plt.show()
| bsd-3-clause |
Nathx/parental_advisory_ml | scripts/target_engineering.py | 2 | 3352 | # This script experiments with word2vec to cluster the target based on MPAA
# descriptions.
from gensim.models import Word2Vec
import logging
import seaborn as sns
import pandas as pd
import numpy as np
import regex as re
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
from pymongo import MongoClient
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from matplotlib.cm import ScalarMappable
from sklearn.decomposition import PCA
from datetime import datetime
import string
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def tokenize(reason):
reason = re.sub(ur'[^\P{P}-]+', ' ', reason.lower()).split()
if 'for' in reason:
reason = reason[reason.index('for')+1:]
elif len(reason) < 3:
reason = []
else:
reason = reason[2:]
return [w for w in reason if not w in (stop + list(string.punctuation))]
def clean_data(coll):
target_df = pd.DataFrame(list(coll.find()))
clustering = target_df[['RATING', 'REASON']]
clustering = clustering[clustering.REASON != ''].drop(938)
return clustering
def build_w2b_mat(filename, vocab):
model = Word2Vec.load_word2vec_format('GloVe-1.2/vectors.txt', binary=False)
w2v_mat = np.zeros((len(model[vocab[0]]), len(vocab)))
for j, word in enumerate(vocab):
w2v_mat[:, j] = model[word]
return w2v_mat
def build_dendrogram(w2v_mat):
sim_vec = pdist(w2v_mat.T, 'cosine')
sim_mat = squareform(sim_vec)
link_mat = linkage(sim_mat, method='complete', metric='cosine')
fig = plt.figure(figsize=(10,10))
d = dendrogram(link_mat, truncate_mode='level', color_threshold=.09, orientation='left',
labels=vocab,
leaf_font_size=10)
return d
def build_wordmap(w2v_mat):
pca = PCA(n_components=2)
pca.fit(w2v_mat.T)
w2v_pca = pca.transform(w2v_mat.T)
km = KMeans(n_clusters=6)
labels = km.fit_predict(w2vt_mat.T)
colors = 255 * ScalarMappable(cmap='Paired').to_rgba(np.unique(labels))[:, :3]
hex_colors = ['#%02x%02x%02x' % (r, g, b) for r,g,b in colors]
sns.set_style('dark')
fig, ax = plt.subplots(1,1, figsize=(1.5,1.5))
ax.axis('off')
# ax = fig.add_subplot(111)
for i in range(w2vt_pca.shape[0]):
plt.text(w2vt_pca[i, 0], w2vt_pca[i, 1], str(vocab[i]),
fontdict={'color': hex_colors[labels[i]], 'size': 12})
return ax
if __name__ == '__main__':
# pull data
client = MongoClient()
db = client.movies
coll = db.film_ratings
clustering = clean_data(coll)
print datetime.now(), "Data loaded."
stop = stopwords.words('english')
vectorizer = TfidfVectorizer(tokenizer=tokenize, max_df=.99, min_df=.01)
tfidf = vectorizer.fit_transform(clustering.REASON.values)
vocab = vectorizer.get_feature_names()
print datetime.now(), "TFIDF built."
filename = 'GloVe-1.2/vectors.txt'
w2v_mat = build_w2b_mat(filename, vocab)
fig, axes = plt.subplots(2,1, figsize=(10,15))
axes[0] = build_dendrogram(w2v_mat)
print datetime.now(), "Dendrogram generated."
axes[1] = build_wordmap(w2v_mat)
print datetime.now(), "PCA generated."
fig.savefig('images/plot_' + filename)
print datetime.now(), "File saved."
| mit |
knights-lab/NINJA-SHOGUN | shogun/scripts/old/shogun_bt2_functional.py | 1 | 5408 | #!/usr/bin/env python
"""
Copyright 2015-2017 Knights Lab, Regents of the University of Minnesota.
This software is released under the GNU Affero General Public License (AGPL) v3.0 License.
"""
import click
from collections import defaultdict
import os
import pandas as pd
from cytoolz import valmap, valfilter
import csv
from ninja_utils.utils import find_between
from ninja_utils.utils import verify_make_dir
from dojo.taxonomy import NCBITree
from dojo.taxonomy.maps import IMGMap
from shogun.wrappers import bowtie2_align
from shogun.parsers import yield_alignments_from_sam_inf
def build_img_ncbi_map(align_gen, lca, img_map):
"""
Given a generator for SAM file, return a dictionary with QNAME as the key
and (IMG IDs: list, LCA NCBI ID: int) as the value.
:param align_gen:
:param lca:
:param img_map:
:return:
"""
lca_map = defaultdict(lambda: [set(), None])
for qname, rname in align_gen:
img_id = int(rname.split('_')[0])
if qname in lca_map:
current_rname = lca_map[qname][1]
new_taxon = img_map(img_id)
if current_rname and new_taxon:
if current_rname != new_taxon:
lca_map[qname][1] = lca(current_rname, new_taxon)
else:
lca_map[qname][1] = img_map(img_id)
lca_map[qname][0].add(rname)
return lca_map
@click.command()
@click.option('-i', '--input', type=click.Path(), default=os.getcwd(),
help='Directory containing the input FASTA files with ".fna" extensions (default=cwd)')
@click.option('-o', '--output', type=click.Path(), default=os.path.join(os.getcwd(), 'shogun_bt2_lca_out'),
help='Output directory for the results')
@click.option('-b', '--bt2_indx', help='Path to the bowtie2 index')
@click.option('-x', '--extract_ncbi_tid', default='ncbi_tid|,|',
help='Characters that sandwich the NCBI TID in the reference FASTA (default="ncbi_tid|,|")')
@click.option('-p', '--threads', type=click.INT, default=1, help='The number of threads to use (default=1)')
def shogun_functional(input, output, bt2_indx, extract_ncbi_tid, threads):
verify_make_dir(output)
basenames = [os.path.basename(filename)[:-4] for filename in os.listdir(input) if filename.endswith('.fna')]
# Create a SAM file for each input FASTA file
for basename in basenames:
fna_inf = os.path.join(input, basename + '.fna')
sam_outf = os.path.join(output, basename + '.sam')
if os.path.isfile(sam_outf):
print("Found the samfile \"%s\". Skipping the alignment phase for this file." % sam_outf)
else:
print(bowtie2_align(fna_inf, sam_outf, bt2_indx, num_threads=threads))
img_map = IMGMap()
for basename in basenames:
sam_inf = os.path.join(output, basename + '.sam')
step_outf = 'test'
if os.path.isfile(step_outf):
print("Found the \"%s.kegg.csv\". Skipping the LCA phase for this file." % step_outf)
else:
lca_map = build_img_ncbi_map(yield_alignments_from_sam_inf(sam_inf), )
sam_files = [os.path.join(args.input, filename) for filename in os.listdir(args.input) if filename.endswith('.sam')]
img_map = IMGMap()
ncbi_tree = NCBITree()
lca = LCA(ncbi_tree, args.depth)
with open(args.output, 'w') if args.output else sys.stdout as outf:
csv_outf = csv.writer(outf, quoting=csv.QUOTE_ALL, lineterminator='\n')
csv_outf.writerow(['sample_id', 'sequence_id', 'ncbi_tid', 'img_id'])
for file in sam_files:
with open(file) as inf:
lca_map = build_lca_map(yield_alignments_from_sam_inf(inf), lca, img_map)
for key in lca_map:
img_ids, ncbi_tid = lca_map[key]
csv_outf.writerow([os.path.basename(file).split('.')[0], key, ncbi_tid, ','.join(img_ids)])
if run_lca:
tree = NCBITree()
rank_name = list(tree.lineage_ranks.keys())[depth - 1]
if not rank_name:
raise ValueError('Depth must be between 0 and 7, it was %d' % depth)
begin, end = extract_ncbi_tid.split(',')
counts = []
for basename in basenames:
sam_file = os.path.join(output, basename + '.sam')
lca_map = {}
for qname, rname in yield_alignments_from_sam_inf(sam_file):
ncbi_tid = int(find_between(rname, begin, end))
if qname in lca_map:
current_ncbi_tid = lca_map[qname]
if current_ncbi_tid:
if current_ncbi_tid != ncbi_tid:
lca_map[qname] = tree.lowest_common_ancestor(ncbi_tid, current_ncbi_tid)
else:
lca_map[qname] = ncbi_tid
if annotate_lineage:
lca_map = valmap(lambda x: tree.green_genes_lineage(x, depth=depth), lca_map)
taxon_counts = Counter(filter(None, lca_map.values()))
else:
lca_map = valfilter(lambda x: tree.get_rank_from_taxon_id(x) == rank_name, lca_map)
taxon_counts = Counter(filter(None, lca_map.values()))
counts.append(taxon_counts)
df = pd.DataFrame(counts, index=basenames)
df.T.to_csv(os.path.join(output, 'taxon_counts.csv'))
if __name__ == '__main__':
shogun_functional()
| mit |
anirudhjayaraman/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
waterponey/scikit-learn | examples/covariance/plot_outlier_detection.py | 36 | 5023 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
| bsd-3-clause |
yotamfr/prot2vec | src/python/mongoscripts.py | 1 | 12877 | import datetime
from xml.etree.ElementTree import fromstring
import pandas as pd
import os
import requests
from Bio import SeqIO
from Bio.UniProt import GOA
from parse import parse
from pymongo import MongoClient
from tqdm import tqdm
from xmljson import badgerfish as bf
import utils
import parameters as params
from models import EcodDomain
args = params.arguments
logger = utils.get_logger("mongoscripts")
client = MongoClient('mongodb://localhost:27017/')
dbname = args["db"]
db = client[dbname]
def parse_int(x):
try:
return int(x)
except ValueError:
return "NA"
def parse_float(x):
try:
return float(x)
except ValueError:
return "NA"
def parse_list(x):
return x.strip().split(' | ')
def parse_bool(x):
if x == "yes":
return True
elif x == "no":
return False
def get_GO_terms(pdb_id):
pdb, chain = pdb_id[:4], pdb_id[4:]
req = requests.get('http://www.rcsb.org/pdb/rest/goTerms?structureId=%s.%s' % (pdb, chain))
if req.status_code != 200: # then assume it's a .cif
raise requests.HTTPError('HTTP Error %s' % req.status_code)
data = bf.data(fromstring(req.content))['goTerms']
return [] if 'term' not in data else data['term']
def load_ecod_sequences(start=db.ecod.count({})):
numseq = 0
filename = args["ecod_fasta"]
logger.info("Countig ECOD sequences.")
fasta_sequences = SeqIO.parse(open(filename), 'fasta')
for _ in fasta_sequences: numseq += 1
logger.info("Loading %s ECOD sequences to %s ..." % (numseq, dbname))
fasta_sequences = SeqIO.parse(open(filename), 'fasta')
for _ in tqdm(range(numseq), desc="sequences processed"):
fasta = next(fasta_sequences)
ecod = EcodDomain(header=fasta.description, sequence=fasta.seq)
db.ecod.update_one({"_id": ecod.eid}, {
"$set": {
"uid": ecod.uid,
"complex": ecod.pdb,
"chain": ecod.chain,
"num": int(ecod.num),
"ecod_id": ecod.eid,
"sequence": str(ecod.seq),
"hierarchy": ecod.hierarchy,
"loci": [{"chain": loc.chain, "start": loc.start, "end": loc.end} for loc in ecod.loci],
}
}, upsert=True)
logger.info("\nFinished!")
# def load_pdb_sequences(collection, filename, start=None, fetch_go=False):
#
# numseq = 0
# if not start: start = collection.count({}) + 5
# logger.info("Countig PDB sequences.")
# fasta_sequences = SeqIO.parse(open(filename), 'fasta')
# for _ in fasta_sequences: numseq += 1
# logger.info("Loading %s PDB sequences to %s ..." % (numseq, dbname))
# fasta_sequences = SeqIO.parse(open(filename), 'fasta')
#
# formatC = "{pdb_id}{:s}{:w}{:s}{seq_length:INT}{:s}{method}{:s}{resolution:FLOAT}{:s}{r_val_f:FLOAT}" \
# "{:s}{r_val_w:FLOAT} yes{desc}<{uniprot_str}>{:s}[{organism}]"
# formatD = "{pdb_id}{:s}{:w}{:s}{seq_length:INT}{:s}{method}{:s}{resolution:FLOAT}{:s}{r_val_f:FLOAT}" \
# "{:s}{r_val_w:FLOAT} no{desc}<{uniprot_str}>{:s}[{organism}]"
#
# uniprot_format = "{uid}({start:INT}-{end:INT})"
#
# for i in tqdm(range(numseq), desc="sequences processed"):
#
# fasta = next(fasta_sequences)
#
# if i < start: continue
# d = None
#
# if ' ||' in fasta.description:
# desc, dup = fasta.description.split(' ||')
# else:
# desc, dup = fasta.description, None
#
# if not d: d = parse(formatC, desc,
# dict(INT=parse_int, FLOAT=parse_float, BOOL=parse_bool, LIST=parse_list))
# if not d: d = parse(formatD, desc,
# dict(INT=parse_int, FLOAT=parse_float, BOOL=parse_bool, LIST=parse_list))
# if not d: continue
#
# descriptors = d["desc"].strip().split(' | ')
#
# uniprot = None if d["uniprot_str"] == "NA" else \
# [parse(uniprot_format, u, dict(INT=parse_int)) if '(' in u
# else {"uid": u, "start": -1, "end": -1} for u in d["uniprot_str"].split(' | ')]
#
# assert d["pdb_id"] == fasta.id
#
# terms = [] if not fetch_go else get_GO_terms(fasta.id)
#
# collection.update_one({"_id": fasta.id}, {
# "$set": {
# "pdb_id": d["pdb_id"],
# "complex": d["pdb_id"][:4],
# "chain": d["pdb_id"][4:],
# "sequence": str(fasta.seq),
# "seq_length": d["seq_length"],
# "method": d["method"],
# "resolution": d["resolution"],
# "r_val_free": d["r_val_f"],
# "r_val_work": d["r_val_w"],
# "uniprot": [] if not uniprot
# else [{
# "uid": u["uid"],
# "start": u["start"],
# "end": u["end"]
# } for u in uniprot],
# "organism": d["organism"],
# "goTerms":
# [{"goid": t['@id'],
# "ontology": t['detail']['@ontology'],
# "name": t['detail']['@name'],
# "definition": t['detail']['@definition']
# } for t in terms],
# "descriptors": descriptors,
# "duplicates": [] if not dup else dup.split(' ')
# }
# }, upsert=True)
#
# logger.info("\nFinished!")
# def load_pdb_goa(start=db.goa_pdb.count({})): # load GOA in a flat structure
#
# logger.info("Countig GeneOntology annotations ...")
# numannots = 0
# filename = args["pdb_gaf"]
#
# with open(filename, 'r') as handler:
# goa = GOA.gafiterator(handler)
# for line in goa:
# numannots += 1
# logger.info("Loading %s GO annotations..." % numannots)
# with open(filename, 'r') as handler:
# goa = GOA.gafiterator(handler)
# for i in tqdm(range(numannots), desc="annotations already processed"):
#
# data = next(goa)
#
# if i < start: continue
#
# date = datetime.datetime.strptime(data['Date'], "%Y%m%d").date()
# assert data["DB_Object_ID"] == data["DB_Object_Symbol"]
#
# pdb = data["DB_Object_ID"][:4]
# chain = data["DB_Object_ID"][5:]
# json = {
# "PDB_ID": pdb+chain,
# "Entry_ID": pdb,
# "Chain": chain,
# "DB_Object_ID": data['DB_Object_ID'],
# "With": data['With'],
# "Assigned_By": data["Assigned_By"],
# "Annotation_Extension": data['Annotation_Extension'],
# "Gene_Product_Form_ID": data['Gene_Product_Form_ID'],
# "DB:Reference": data['DB:Reference'],
# "GO_ID": data['GO_ID'],
# "Qualifier": data['Qualifier'],
# "Date": datetime.datetime.fromordinal(date.toordinal()),
# "DB": data['DB'],
# "created_at": datetime.datetime.utcnow(),
# "DB_Object_Name": data['DB_Object_Name'],
# "DB_Object_Type": data['DB_Object_Type'],
# "Evidence": data['Evidence'],
# "Taxon_ID": data['Taxon_ID'],
# "Aspect": data['Aspect']
# }
# db.goa_pdb.update_one( {
# "_id": i}, {
# '$set': json
# }, upsert=True)
#
# logger.info("\nFinished!")
def add_single_uniprot(fasta):
header, sequence = fasta.id, str(fasta.seq)
dbname, UniqueIdentifier, EntryName = header.split(' ')[0].split('|')
prot = {
"primary_accession": UniqueIdentifier,
"db": dbname,
"entry_name": EntryName,
"sequence": sequence,
"length": len(sequence),
"created_at": datetime.datetime.utcnow(),
"header": header
}
db.uniprot.update_one({
"_id": UniqueIdentifier}, {
"$set": prot
}, upsert=True)
def load_uniprot(src_fasta, start=db.uniprot.count({})): # http://www.uniprot.org/help/fasta-headers
numseq = 0
logger.info("Countig Uniprot sequences.")
fasta_sequences = SeqIO.parse(open(src_fasta), 'fasta')
for _ in fasta_sequences:
numseq += 1
logger.info("\nLoading %s Uniprot sequences to %s ...\n" % (numseq, dbname))
fasta_sequences = SeqIO.parse(open(src_fasta), 'fasta')
for i in tqdm(range(numseq), desc="sequences already processed"):
if i < start:
continue
add_single_uniprot(next(fasta_sequences))
logger.info("\nFinished!")
# def load_entire_intact():
#
# df = pd.read_table(args["intact_all"], sep='\t', low_memory=True)
# logger.info("reading %s entries from %s ." % (len(df.index), args["intact_all"]))
# it = df.iterrows()
# for i in tqdm(range(len(df.index)), desc="identifiers already processed"):
# _, row = next(it)
# json = row.to_dict()
# for oldkey in json.keys():
# if " " in oldkey:
# newkey = "_".join(oldkey.split(" "))
# json[newkey] = json[oldkey]
# if "|" in str(json[oldkey]):
# json[newkey] = json[oldkey].split("|")
# del json[oldkey]
#
# db.intact.update_one({
# "_id": i}, {
# '$set': json
# }, upsert=True)
# def load_entire_biogrid():
#
# df = pd.read_table(args["biogrid_ids"], sep='\t', low_memory=True, skiprows=range(27))
# logger.info("reading %s entries from %s ." % (len(df.index), args["biogrid_ids"]))
# it = df.iterrows()
# for i in tqdm(range(len(df.index)), desc="identifiers already processed"):
# _, row = next(it)
# json = row.to_dict()
#
# if json["IDENTIFIER_TYPE"] == "SWISS-PROT" \
# or json["IDENTIFIER_TYPE"] == "UNIPROT-ACCESSION":
#
# db.biogrid_ids.update_one({
# "_id": i}, {
# '$set': json
# }, upsert=True)
#
# for filename in os.listdir(args["biogrid_organism"]):
# filepath = "%s/%s" % (args["biogrid_organism"], filename)
# df = pd.read_table(filepath, sep='\t', low_memory=False)
# logger.info("reading %s entries from %s ." % (len(df.index), filename))
# for i, row in df.iterrows():
# json = row.to_dict()
# for oldkey in json.keys():
# if " " in oldkey:
# newkey = "_".join(oldkey.split(" "))
# json[newkey] = json[oldkey]
# if "|" in str(json[oldkey]):
# json[newkey] = json[oldkey].split("|")
# del json[oldkey]
#
# json["Organism_Name"] = filename.split("-")[2]
#
# db.biogrid.update_one({
# "_id": json['#BioGRID_Interaction_ID']}, {
# '$set': json
# }, upsert=True)
def load_entire_goa(src_gpa, start=db.goa_uniprot.count({})): # load GOA in a flat structure
logger.info("Countig GeneOntology annotations ...")
numannots = 0
with open(src_gpa, 'r') as handler:
goa = GOA._gpa11iterator(handler)
for _ in goa:
numannots += 1
logger.info("\nLoading %s GO annotations to %s ...\n" % (numannots, dbname))
with open(src_gpa, 'r') as handler:
goa = GOA._gpa11iterator(handler)
for i in tqdm(range(numannots), desc="annotations already processed"):
data = next(goa)
date = datetime.datetime.strptime(data['Date'], "%Y%m%d").date()
if i < start:
continue
json = {
"DB_Object_ID": data["DB_Object_ID"],
"Annotation_Properties": data['Annotation_Properties'],
"With": data['With'],
"Interacting_taxon_ID": data['Interacting_taxon_ID'],
"DB:Reference": data['DB:Reference'],
"Annotation_Extension": data['Annotation Extension'],
"Assigned_by": data['Assigned_by'],
"GO_ID": data['GO_ID'],
"ECO_Evidence_code": data['ECO_Evidence_code'],
"Qualifier": data['Qualifier'],
"Date": datetime.datetime.fromordinal(date.toordinal()),
"DB": data['DB'],
"created_at": datetime.datetime.utcnow()
}
db.goa_uniprot.update_one({
"_id": i}, {
'$set': json
}, upsert=True)
def main():
# load_uniprot(args["uniprot_sprot_fasta"], 0)
# load_uniprot(args["uniprot_trembl_fasta"], 0)
load_entire_goa(args["goa_uniprot_all"], 385500000)
# load_cafa3_training(None, args["cafa3_sprot_goa"])
if __name__ == "__main__":
main()
| mit |
rs2/pandas | pandas/tests/window/test_ewm.py | 1 | 4087 | import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
from pandas import DataFrame, DatetimeIndex, Series, date_range
import pandas._testing as tm
from pandas.core.window import ExponentialMovingWindow
def test_doc_string():
df = DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
df.ewm(com=0.5).mean()
def test_constructor(which):
c = which.ewm
# valid
c(com=0.5)
c(span=1.5)
c(alpha=0.5)
c(halflife=0.75)
c(com=0.5, span=None)
c(alpha=0.5, com=None)
c(halflife=0.75, alpha=None)
# not valid: mutually exclusive
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
c(com=0.5, alpha=0.5)
with pytest.raises(ValueError, match=msg):
c(span=1.5, halflife=0.75)
with pytest.raises(ValueError, match=msg):
c(alpha=0.5, span=1.5)
# not valid: com < 0
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
c(com=-0.5)
# not valid: span < 1
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
c(span=0.5)
# not valid: halflife <= 0
msg = "halflife must satisfy: halflife > 0"
with pytest.raises(ValueError, match=msg):
c(halflife=0)
# not valid: alpha <= 0 or alpha > 1
msg = "alpha must satisfy: 0 < alpha <= 1"
for alpha in (-0.5, 1.5):
with pytest.raises(ValueError, match=msg):
c(alpha=alpha)
@pytest.mark.parametrize("method", ["std", "mean", "var"])
def test_numpy_compat(method):
# see gh-12811
e = ExponentialMovingWindow(Series([2, 4, 6]), alpha=0.5)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
def test_ewma_times_not_datetime_type():
msg = r"times must be datetime64\[ns\] dtype."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(5))
def test_ewma_times_not_same_length():
msg = "times must be the same length as the object."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(4).astype("datetime64[ns]"))
def test_ewma_halflife_not_correct_type():
msg = "halflife must be a string or datetime.timedelta object"
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=1, times=np.arange(5).astype("datetime64[ns]"))
def test_ewma_halflife_without_times(halflife_with_times):
msg = "halflife can only be a timedelta convertible argument if times is not None."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=halflife_with_times)
@pytest.mark.parametrize(
"times",
[
np.arange(10).astype("datetime64[D]").astype("datetime64[ns]"),
date_range("2000", freq="D", periods=10),
date_range("2000", freq="D", periods=10).tz_localize("UTC"),
"time_col",
],
)
@pytest.mark.parametrize("min_periods", [0, 2])
def test_ewma_with_times_equal_spacing(halflife_with_times, times, min_periods):
halflife = halflife_with_times
data = np.arange(10.0)
data[::2] = np.nan
df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)})
result = df.ewm(halflife=halflife, min_periods=min_periods, times=times).mean()
expected = df.ewm(halflife=1.0, min_periods=min_periods).mean()
tm.assert_frame_equal(result, expected)
def test_ewma_with_times_variable_spacing(tz_aware_fixture):
tz = tz_aware_fixture
halflife = "23 days"
times = DatetimeIndex(
["2020-01-01", "2020-01-10T00:04:05", "2020-02-23T05:00:23"]
).tz_localize(tz)
data = np.arange(3)
df = DataFrame(data)
result = df.ewm(halflife=halflife, times=times).mean()
expected = DataFrame([0.0, 0.5674161888241773, 1.545239952073459])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
deculler/MachineLearningTables | ml_table.py | 1 | 30116 | from datascience import Table
from numbers import Number
import numpy as np
import matplotlib.pyplot as plots
from matplotlib import cm
from mpl_toolkits.mplot3d.axes3d import Axes3D
from sklearn import linear_model, neighbors, discriminant_analysis
from matplotlib import colors
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plots.cm.register_cmap(cmap=cmap)
# Regression objects that provide a simple functional abstraction
# retain scikit_learn functionality
# and the R perspective
class Regression():
"""Container for KNN clasifiers object."""
def __init__(self, obj, model,
source_table=None, output_label=None, input_labels=None):
self.obj = obj
self.model = model
self.source_table = source_table
self.input_labels = input_labels
self.output_label = output_label
class Linear(Regression):
"""Container for Linear regression object, ordinary and Ridge."""
def __init__(self, obj, params, model,
source_table=None, output_label=None, input_labels=None):
Regression.__init__(self, obj, model, source_table, output_label, input_labels)
self.params = params
def summary(self):
b0, bs = self.params
sum_tbl = Table().with_columns([("Param", ['Intercept']+self.input_labels),
("Coefficient", [b0]+list(bs)),
])
sum_tbl['Std Error'] = self.source_table.SE_params(self.output_label, (b0, bs))
sum_tbl['t-statistic'] = sum_tbl['Coefficient'] / sum_tbl['Std Error']
sum_tbl['95% CI'] = [(b-2*se, b+2*se) for b,se in zip(sum_tbl['Coefficient'], sum_tbl['Std Error'])]
sum_tbl['99% CI'] = [(b-3*se, b+3*se) for b,se in zip(sum_tbl['Coefficient'], sum_tbl['Std Error'])]
return sum_tbl
class Logit(Regression):
"""Container for Logistic regression object."""
def __init__(self, obj, params, model,
source_table=None, output_label=None, input_labels=None):
Regression.__init__(self, obj, model, source_table, output_label, input_labels)
self.params = params
def likelihood(self, *args):
b0, bs = self.params
e = np.exp(b0 + np.sum(bs*args))
return e/(1 + e)
def summary(self):
b0, bs = self.params
sum_tbl = Table().with_columns([("Param", ['Intercept']+self.input_labels),
("Coeffient", [b0]+list(bs)),
])
return sum_tbl
class Knn(Regression):
"""Container for KNN clasifiers object."""
def __init__(self, obj, model,
source_table=None, output_label=None, input_labels=None):
Regression.__init__(self, obj, model, source_table, output_label, input_labels)
class LinearDA(Regression):
"""Container for Logistic regression object."""
def __init__(self, obj, params, model,
source_table=None, output_label=None, input_labels=None):
Regression.__init__(self, obj, model, source_table, output_label, input_labels)
self.params = params
######################################
class ML_Table(Table):
"""Table with ML operators defined"""
def __init__(self, *args, **kwargs):
Table.__init__(self, *args, **kwargs)
@classmethod
def from_table(cls, tbl):
ml_tbl = ML_Table()
for label in tbl.labels:
ml_tbl[label] = tbl[label]
return ml_tbl
# Utilities
def _input_labels(self, output_label, input_labels=None):
if input_labels is None:
return [lbl for lbl in self.labels if lbl != output_label]
else:
return self._as_labels(input_labels)
# Column generators
@classmethod
def sequence(cls, label, n, low=0, high=1):
"""Generate a table is a labeled column containing an
arithmetic sequence from low to high of length n."""
return ML_Table().with_column(label, np.arange(low, high, (high-low)/n))
@classmethod
def rnorm(cls, label, n, mean=0, sd=1, seed=None):
"""Generate a table is a labeled column containing a random normal sequence of length n."""
if seed is not None:
np.random.seed(seed)
return ML_Table().with_column(label, np.random.normal(loc=mean, scale=sd, size=n))
@classmethod
def runiform(cls, label, n, lo=-1, hi=1, seed=None):
"""Generate a table with a labeled column containing a uniform random sequence of length n over [lo, hi)."""
if seed is not None:
np.random.seed(seed)
return ML_Table().with_column(label, np.random.rand(n)*(hi-lo) + lo)
# Descriptive Statistics
def summary(self, ops=None):
"""Generate a table corresponding to the R summary operator."""
def FirstQu(x):
return np.percentile(x, 25)
def ThirdQu(x):
return np.percentile(x, 5)
if ops is None:
ops=[min, FirstQu, np.median, np.mean, ThirdQu, max]
return self.stats(ops=ops)
# Regression methods for data fitting - 1d special case
def regression_1d_params(self, Y_label_or_column, x_label_or_column):
"""Return parameters of a linear model of f(x) = Y."""
x_values = self._get_column(x_label_or_column)
Y_values = self._get_column(Y_label_or_column)
m, b = np.polyfit(x_values, Y_values, 1)
return b, m
def regression_1d(self, Y_label_or_column, x_label_or_column):
"""Return a function that is a linear model of f(x) = Y."""
b, m = self.regression_1d_params(Y_label_or_column, x_label_or_column)
return lambda x: m*x + b
# Regression methods for data fitting
def poly_params(self, Y_label, x_label, degree):
"""Return a function that is a polynomial model of f(x) = Y."""
return np.polyfit(self[x_label], self[Y_label], degree)
def poly(self, Y_label, x_label, degree):
"""Return a function that is a polynomial model of f(x) = Y."""
coefs = self.poly_params(Y_label, x_label, degree)
def model(x):
psum = coefs[0]
for c in coefs[1:]:
psum = x*psum + c
return psum
return model
def _regression_obj(self, method, output_label, input_labels=None, **kwargs):
"""Generic pattern of sklearn classifier and regression usage."""
input_labels = self._input_labels(output_label, input_labels)
input_tbl = self.select(input_labels)
regressor = method(**kwargs)
regressor.fit(input_tbl.rows, self[output_label])
return regressor
def linear_regression(self, output_label, input_labels=None, **kwargs):
"""Return a linear regression function trained on a ML_Table.
For kwargs and documentation see
"""
input_labels = self._input_labels(output_label, input_labels)
obj = self._regression_obj(linear_model.LinearRegression,
output_label, input_labels, **kwargs)
params = obj.intercept_, obj.coef_
model = lambda *args: obj.predict([args])[0]
return Linear(obj, params, model, self, output_label, input_labels)
def ridge_regression(self, output_label, input_labels=None, **kwargs):
"""Return a linear ridge regression function trained on a ML_Table.
For kwargs and documentation see
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html
class sklearn.linear_model.Ridge(alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=0.001, solver='auto', random_state=None)[source]¶
"""
input_labels = self._input_labels(output_label, input_labels)
obj = self._regression_obj(linear_model.Ridge,
output_label, input_labels, **kwargs)
params = obj.intercept_, obj.coef_
model = lambda *args: obj.predict([args])[0]
return Linear(obj, params, model, self, output_label, input_labels)
def knn_regression(self, output_label, input_labels=None, **kwargs):
"""Return a knn function trained on a ML_Table.
For kwargs and documentation see
http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html
5.25.4 sklearn.neighbors.KNeighborsRegressor
class sklearn.neighbors.KNeighborsRegressor(n_neighbors=5, weights=’uniform’,
algorithm=’auto’, leaf_size=30, p=2, metric=’minkowski’,
metric_params=None, n_jobs=1, **kwargs)
"""
input_labels = self._input_labels(output_label, input_labels)
obj = self._regression_obj(neighbors.KNeighborsRegressor,
output_label, input_labels, **kwargs)
model = lambda *args: obj.predict([args])[0]
return Knn(obj, model, self, output_label, input_labels)
def logit_regression(self, output_label, input_labels=None, **kwargs):
"""Return a logistic regression function trained on a ML_Table.
For kwargs and documentation see
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression
class sklearn.linear_model.LogisticRegression(penalty=’l2’, dual=False, tol=0.0001,
C=1.0, fit_intercept=True, intercept_scaling=1,
class_weight=None,
random_state=None, solver=’liblinear’,
max_iter=100, multi_class=’ovr’, verbose=0,
warm_start=False, n_jobs=1)
"""
input_labels = self._input_labels(output_label, input_labels)
logit_obj = self._regression_obj(linear_model.LogisticRegression,
output_label, input_labels, **kwargs)
logit_params = logit_obj.intercept_[0], logit_obj.coef_[0]
logit_model = lambda *args: logit_obj.predict([args])[0]
return Logit(logit_obj, logit_params, logit_model, self, output_label, input_labels)
def LDA(self, output_label, input_labels=None, **kwargs):
"""Return a linear discriminant analysis trained on a ML_Table.
For kwargs and documentation see
http://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis.html
class sklearn.discriminant_analysis.LinearDiscriminantAnalysis(solver='svd',
shrinkage=None, priors=None, n_components=None, store_covariance=False, tol=0.0001)
This version is assuming 1 feature
"""
input_labels = self._input_labels(output_label, input_labels)
lda_obj = self._regression_obj(discriminant_analysis.LinearDiscriminantAnalysis,
output_label, input_labels, **kwargs)
lda_params = lda_obj.intercept_[0], lda_obj.coef_[0]
lda_model = lambda *args: lda_obj.predict([args])[0]
return LinearDA(lda_obj, lda_params, lda_model, self, output_label, input_labels)
# Statistics used in assessing models and understanding data
def Cor_coef(self, x_column_or_label, y_column_or_label):
"""Computer the correlation coefficient between two columns."""
x_values = self._get_column(x_column_or_label)
y_values = self._get_column(y_column_or_label)
x_res = x_values - np.mean(x_values)
y_res = y_values - np.mean(y_values)
return np.sum(x_res * y_res) / (np.sqrt(np.sum(x_res**2)) * np.sqrt(sum(y_res**2)))
def Cor(self):
"""Create a correlation matrix of numeric columns as a table."""
assert(self.num_rows > 0)
num_labels = [lbl for lbl in self.labels if isinstance(self[lbl][0], Number)]
tbl = self.select(num_labels)
Cor_tbl = Table().with_column("Param", num_labels)
for lbl in num_labels:
Cor_tbl[lbl] = [self.Cor_coef(lbl, xlbl) for xlbl in num_labels]
return Cor_tbl
def TSS(self, y_column_or_label):
"""Calulate the total sum of squares of observation column y."""
y_values = self._get_column(y_column_or_label)
y_mean = np.mean(y_values)
return np.sum((y_values-y_mean)**2)
def RSS(self, y_column_or_label, f_column_or_label):
"""Calulate the residual sum of square of observations y from estimate f."""
y_values = self._get_column(y_column_or_label)
f_values = self._get_column(f_column_or_label)
return np.sum((y_values - f_values)**2)
def MSE(self, y_column_or_label, f_column_or_label):
"""Calulate the mean squared error of a observations y from estimate f."""
return self.RSS(y_column_or_label, f_column_or_label)/self.num_rows
def RSE(self, y_column_or_label, f_column_or_label):
"""Computer the residual standard error of estimate f."""
return np.sqrt(self.RSS(y_column_or_label, f_column_or_label)/(self.num_rows - 2))
def R2(self, y_column_or_label, f_column_or_label):
"""Calulate the R^2 statistic of estimate of output y from f ."""
return 1 - (self.RSS(y_column_or_label, f_column_or_label)/self.TSS(y_column_or_label))
def F_stat(self, y_column_or_label, f_column_or_label, p):
"""Calulate the F-statistic of estimate f over p parameters ."""
f_values = self._get_column(f_column_or_label)
n = len(f_values)
rss = self.RSS(y_column_or_label, f_values)
tss = self.TSS(y_column_or_label)
return ((tss-rss)/p) / (rss/(n - p - 1))
def leverage_1d(self, x_column_or_label):
"""Calulate the 1d leverage statistic of an input column."""
x_values = self._get_column(x_column_or_label)
x_mean = np.mean(x_values)
x_ss = np.sum((x_values - x_mean)**2)
return ((x_values - x_mean)**2)/x_ss + (1/len(x_values))
# Category density
def classification_error(self, y_column_or_label, f_column_or_label):
y_values = self._get_column(y_column_or_label)
f_values = self._get_column(f_column_or_label)
correct = np.count_nonzero(y_values == f_values)
return (len(y_values)-correct)/len(y_values)
def density(self, output_label, input_label, bins=20, counts=False):
"""Generate a table containing the density of of inputs for each
key in a categorical output.
"""
cat_counts = self.pivot_bin(output_label, input_label, bins=bins, normed=False)
cat_counts.relabel('bin', input_label)
if counts:
return cat_counts
totals = [np.sum(row[1:]) for row in cat_counts.rows]
cat_density = cat_counts.select(input_label)
for label in cat_counts.labels[1:]:
cat_density[label] = cat_counts[label]/totals
return cat_density
# Common statistics from model functions - 1D special case
def RSS_model_1d(self, y_column_or_label, model_fun, x_column_or_label):
f_values = model_fun(self._get_column(x_column_or_label))
return self.RSS(y_column_or_label, f_values)
def R2_model_1d(self, y_column_or_label, model_fun, x_column_or_label):
f_values = model_fun(self._get_column(x_column_or_label))
return self.R2(y_column_or_label, f_values)
def SE_1d_params(self, y_column_or_label, x_column_or_label, model = None):
"""Return the Standard Error of the parameters for a 1d regression."""
x_values = self._get_column(x_column_or_label)
x_mean = np.mean(x_values)
x_dif_sq = np.sum((x_values - x_mean)**2)
n = self.num_rows
if model is None:
model = self.regression_1d(y_column_or_label, x_values)
sigma_squared = (self.RSS_model_1d(y_column_or_label, model, x_values))/(n-2)
SE_b0_squared = sigma_squared*(1/n + (x_mean**2)/x_dif_sq) # constant term
SE_b1_squared = sigma_squared/x_dif_sq # linear term
return np.sqrt(SE_b0_squared), np.sqrt(SE_b1_squared)
def lm_summary_1d(self, y_column_or_label, x_label):
b0, b1 = self.regression_1d_params(y_column_or_label, x_label)
r_model = lambda x: b0 + x*b1
SE_b0, SE_b1 = self.SE_1d_params(y_column_or_label, x_label, r_model)
sum_tbl = Table().with_column('Param', ['intercept', x_label])
sum_tbl['Coefficient'] = [b0, b1]
sum_tbl['Std Error'] = (SE_b0, SE_b1)
sum_tbl['t-statistic'] = np.array([b0, b1])/sum_tbl['Std Error']
sum_tbl['95% CI'] = [(b0-2*SE_b0, b0+2*SE_b0), (b1-2*SE_b1, b1+2*SE_b1)]
sum_tbl['99% CI'] = [(b0-3*SE_b0, b0+3*SE_b0), (b1-3*SE_b1, b1+3*SE_b1)]
return sum_tbl
# Common statistics from model functions - general case
def f_values(self, output_label, model_fun, input_labels=None):
input_labels = self._input_labels(output_label, input_labels)
return [model_fun(*row) for row in self.select(input_labels).rows]
def classification_error_model(self, output_label, model_fun, input_labels=None):
"""Compute the residual sum of squares (RSS) for a model on a table."""
f_values = self.f_values(output_label, model_fun, input_labels)
return self.classification_error(output_label, f_values)
def RSS_model(self, output_label, model_fun, input_labels=None):
"""Compute the residual sum of squares (RSS) for a model on a table."""
f_values = self.f_values(output_label, model_fun, input_labels)
return self.RSS(output_label, f_values)
def R2_model(self, output_label, model_fun, input_labels=None):
"""Compute R^2 statistic for a model of a table.
"""
f_values = self.f_values(output_label, model_fun, input_labels)
return self.R2(output_label, f_values)
def F_model(self, output_label, model_fun, input_labels=None):
"""Compute f-statistic for a model of a table.
"""
p = len(self._input_labels(output_label)) if input_labels is None else 1
f_values = self.f_values(output_label, model_fun, input_labels)
return self.F_stat(output_label, f_values, p)
def RSE_model(self, output_label, model_fun, input_labels=None):
f_values = self.f_values(output_label, model_fun, input_labels)
return self.RSE(output_label, f_values)
def MSE_model(self, output_label, model_fun, input_labels=None):
f_values = self.f_values(output_label, model_fun, input_labels)
return self.MSE(output_label, f_values)
def LOOCV_model(self, output_label, method, input_labels=None):
"""Computer the leave out one cross validation of a modeling method applied to
a training table."""
n = self.num_rows
MSEs = [loo.MSE_model(output_label, loo.method(output_label, input_labels).model,
input_labels) for loo in [self.exclude(i) for i in range(n)]
]
return np.sum(MSEs)/n
def SE_params(self, output_label, params, input_labels=None):
"""Return the standard error of the parameters of a regression."""
if input_labels is None:
input_labels = self._input_labels(output_label)
Y = self[output_label] # Response vector
n = len(Y) # Number of points
p = len(input_labels) # Number of paraeters
# Design matrix
X = np.array([np.append([1], row) for row in self.select(input_labels).rows])
b0, slopes = params
b = np.append([b0], slopes) # slope vector
residual = np.dot(X, b) - Y
sigma2 = np.sum(residual**2)/(n-p-1)
# standard error matrix
std_err_matrix = sigma2*np.linalg.inv(np.dot(np.transpose(X), X))
coef_std_err = [np.sqrt(std_err_matrix[i,i]) for i in range(len(std_err_matrix))]
return coef_std_err
def lm_summary(self, output_label):
intercept, slopes = self.regression_params(output_label)
mdl = ML_Table._make_model(intercept, slopes)
input_labels = [lbl for lbl in self.labels if not lbl == output_label]
sum_tbl = Table().with_column('Param', ['Intercept'] + input_labels)
sum_tbl['Coefficient'] = [intercept] + list(slopes)
sum_tbl['Std Error'] = self.SE_params(output_label, (intercept, slopes))
sum_tbl['t-statistic'] = sum_tbl['Coefficient'] / sum_tbl['Std Error']
sum_tbl['95% CI'] = [(b-2*se, b+2*se) for b,se in zip(sum_tbl['Coefficient'], sum_tbl['Std Error'])]
sum_tbl['99% CI'] = [(b-3*se, b+3*se) for b,se in zip(sum_tbl['Coefficient'], sum_tbl['Std Error'])]
return sum_tbl
def lm_fit(self, output_label, model_fun, x_column_or_label=None):
if x_column_or_label is None:
input_labels = [lbl for lbl in self.labels if not lbl == output_label]
f_values = [model_fun(*row) for row in self.select(input_labels).rows]
p = len(input_labels)
else:
f_values = model_fun(self._get_column(x_column_or_label))
p = 1
fit_tbl = Table(["Quantity", "Value"])
return fit_tbl.with_rows([("Residual standard error", self.RSE(output_label, f_values)),
("R^2", self.R2(output_label, f_values)),
("F-statistic", self.F_stat(output_label, f_values, p))])
# Visualization
def _plot_contour(f, x_lo, x_hi, y_lo, y_hi, n=20, **kwargs):
"""Helper to form contour plot of a function over a 2D domain."""
x_step = (x_hi - x_lo)/n
y_step = (y_hi - y_lo)/n
x_range = np.arange(x_lo, x_hi, x_step)
y_range = np.arange(y_lo, y_hi, y_step)
X, Y = np.meshgrid(x_range, y_range)
Z = [[f(x,y) for x in x_range] for y in y_range]
fig, ax = plots.subplots()
CS = ax.contour(X, Y, Z, **kwargs)
ax.clabel(CS, inline=2, fontsize=10)
ax.grid(c='k', ls='-', alpha=0.3)
return ax
def RSS_contour(self, Y_column_or_label, x_column_or_label, scale=1,
sensitivity=0.1, n_grid=20, **kwargs):
"""Show contour of RSS around the regression point."""
b0, b1 = self.regression_1d_params(Y_column_or_label, x_column_or_label)
x_values = self._get_column(x_column_or_label)
rss_fun = lambda b0,b1:self.RSS(Y_column_or_label, b0 + b1*x_values)*scale
x_lo, x_hi = b0*(1-sensitivity), b0*(1+sensitivity)
y_lo, y_hi = b1*(1-sensitivity), b1*(1+sensitivity)
ax = ML_Table._plot_contour(rss_fun, x_lo, x_hi, y_lo, y_hi, n = n_grid, **kwargs)
ax.plot([b0], [b1], 'ro')
return ax
def RSS_contour2(self, output_label,
x_sensitivity=0.1, y_sensitivity = 0.1, scale=1,
n_grid=20, **kwargs):
"""Show contour of RSS around the 2-input regression point."""
b0, coefs = self.linear_regression(output_label).params
b1, b2 = coefs
print(b0, b1, b2)
x_lbl, y_lbl = self._input_labels(output_label)
x_values, y_values = self[x_lbl], self[y_lbl]
rss_fun = lambda b1,b2:self.RSS(output_label, b0 + b1*x_values + b2*y_values)*scale
x_lo, x_hi = b1*(1-x_sensitivity), b1*(1+x_sensitivity)
y_lo, y_hi = b2*(1-y_sensitivity), b2*(1+y_sensitivity)
ax = ML_Table._plot_contour(rss_fun, x_lo, x_hi, y_lo, y_hi, n = n_grid, **kwargs)
ax.plot([b1], [b2], 'ro')
ax.set_xlabel(x_lbl)
ax.set_ylabel(y_lbl)
return ax
def _plot_wireframe(f, x_lo, x_hi, y_lo, y_hi, n=20, rstride=1, cstride=1,
x_label=None, y_label=None, z_label=None):
x_step = (x_hi - x_lo)/n
y_step = (y_hi - y_lo)/n
x_range = np.arange(x_lo, x_hi, x_step)
y_range = np.arange(y_lo, y_hi, y_step)
X, Y = np.meshgrid(x_range, y_range)
Z = [[f(x,y) for x in x_range] for y in y_range]
fig = plots.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, Y, Z, rstride=rstride, cstride=cstride, linewidth=1, color='b')
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_xlabel(y_label)
if z_label is not None:
ax.set_xlabel(z_label)
return ax
def RSS_wireframe(self, Y_column_or_label, x_column_or_label,
sensitivity=0.1, n_grid=20):
"""Show wireframe of RSS surface around the regression point."""
b0, b1 = self.regression_1d_params(Y_column_or_label, x_column_or_label)
x_values = self._get_column(x_column_or_label)
rss_fun = lambda b0,b1:self.RSS(Y_column_or_label, b0 + b1*x_values)
x_lo, x_hi = b0*(1-sensitivity), b0*(1+sensitivity)
y_lo, y_hi = b1*(1-sensitivity), b1*(1+sensitivity)
ax = ML_Table._plot_wireframe(rss_fun, x_lo, x_hi, y_lo, y_hi, n=n_grid)
ax.scatter([b0], [b1], [rss_fun(b0, b1)], c='r')
return ax
def plot_fit_1d(self, y_label, x_label, model_fun, n_mesh=50, xmin=None, xmax=None,
width=6, height=4, connect=True, **kwargs):
"""Visualize the error in f(x) = y + error."""
fig, ax = plots.subplots(figsize=(width, height))
ax.scatter(self[x_label], self[y_label])
f_tbl = self.select([x_label, y_label]).sort(x_label, descending=False)
if model_fun is not None:
if xmin is None:
xmin = min(self[x_label])
if xmax is None:
xmax = max(self[x_label])
xstep = (xmax-xmin)/n_mesh
xv = np.arange(xmin, xmax + xstep, xstep)
fun_x = [model_fun(x) for x in xv]
ax.plot(xv, fun_x, **kwargs)
if connect:
for i in range(f_tbl.num_rows):
ax.plot([f_tbl[x_label][i], f_tbl[x_label][i]],
[model_fun(f_tbl[x_label][i]), f_tbl[y_label][i] ], 'r-')
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
return ax
def plot_fit_2d(self, z_label, x_label, y_label, model_fun=None, n_mesh=50,
xmin=None, xmax=None, ymin=None, ymax=None,
connect=True,
rstride=5, cstride=5, width=6, height=4, **kwargs):
fig = plots.figure(figsize=(width, height))
ax = fig.add_subplot(111, projection='3d')
if model_fun is not None:
if xmin is None:
xmin = min(self[x_label])
if xmax is None:
xmax = max(self[x_label])
if ymin is None:
ymin = min(self[y_label])
if ymax is None:
ymax = max(self[y_label])
xstep = (xmax-xmin)/n_mesh
ystep = (ymax-ymin)/n_mesh
xv = np.arange(xmin, xmax + xstep, xstep)
yv = np.arange(ymin, ymax + ystep, ystep)
X, Y = np.meshgrid(xv, yv)
Z = [[model_fun(x,y) for x in xv] for y in yv]
ax.plot_surface(X, Y, Z, rstride=5, cstride=5, linewidth=1, cmap=cm.coolwarm)
ax.plot_wireframe(X, Y, Z, rstride=5, cstride=5, linewidth=1, color='b', **kwargs)
if connect:
for (x, y, z) in zip(self[x_label], self[y_label], self[z_label]):
mz = model_fun(x,y)
ax.plot([x,x], [y,y], [z,mz], color='black')
ax.scatter(self[x_label], self[y_label], self[z_label], c='r', marker='o')
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(z_label)
return ax
def plot_fit(self, f_label, model_fun, width=6, height=4, **kwargs):
"""Visualize the goodness of fit of a model."""
labels = [lbl for lbl in self.labels if not lbl == f_label]
assert len(labels) <= 2, "Too many dimensions to plot"
if len(labels) == 1:
return self.plot_fit_1d(f_label, labels[0], model_fun, **kwargs)
else:
return self.plot_fit_2d(f_label, labels[0], labels[1], model_fun,
width=width, height=height, **kwargs)
def _plot_color(f, x_lo, x_hi, y_lo, y_hi, n=20, **kwargs):
"""Helper to form colormap of a function over a 2D domain."""
x_step = (x_hi - x_lo)/n
y_step = (y_hi - y_lo)/n
x_range = np.arange(x_lo, x_hi, x_step)
y_range = np.arange(y_lo, y_hi, y_step)
X, Y = np.meshgrid(x_range, y_range)
Z = [[f(x,y) for x in x_range] for y in y_range]
fig, ax = plots.subplots()
ax.pcolormesh(X, Y, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
CS = ax.contour(X, Y, Z, [0.5], **kwargs)
ax.grid(c='k', ls='-', alpha=0.3)
return ax
def plot_cut_2d(self, cat_label, x_label, y_label, model_fun=None, n_grid=50,
xmin=None, xmax=None, ymin=None, ymax=None, **kwargs):
if xmin is None:
xmin = min(self[x_label])
if xmax is None:
xmax = max(self[x_label])
if ymin is None:
ymin = min(self[y_label])
if ymax is None:
ymax = max(self[y_label])
ax = ML_Table._plot_color(model_fun, xmin, xmax, ymin, ymax, n_grid, **kwargs)
categories = np.unique(self[cat_label])
colors = plots.cm.nipy_spectral(np.linspace(0, 1, len(categories)))
for cat, color in zip(categories, colors):
ax.scatter(self.where(cat_label, cat)[x_label],
self.where(cat_label, cat)[y_label], color=color)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.legend(categories, loc=2, bbox_to_anchor=(1.05, 1))
return ax
# Cross validation
| bsd-2-clause |
DGrady/pandas | pandas/tests/reshape/test_concat.py | 2 | 78048 | from warnings import catch_warnings
import dateutil
import numpy as np
from numpy.random import randn
from datetime import datetime
from pandas.compat import StringIO, iteritems
import pandas as pd
from pandas import (DataFrame, concat,
read_csv, isna, Series, date_range,
Index, Panel, MultiIndex, Timestamp,
DatetimeIndex)
from pandas.util import testing as tm
from pandas.util.testing import (assert_frame_equal,
makeCustomDataframe as mkdf)
import pytest
class ConcatenateBase(object):
def setup_method(self, method):
self.frame = DataFrame(tm.getSeriesData())
self.mixed_frame = self.frame.copy()
self.mixed_frame['foo'] = 'bar'
class TestConcatAppendCommon(ConcatenateBase):
"""
Test common dtype coercion rules between concat and append.
"""
def setup_method(self, method):
dt_data = [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')]
tz_data = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')]
td_data = [pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days')]
period_data = [pd.Period('2011-01', freq='M'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M')]
self.data = {'bool': [True, False, True],
'int64': [1, 2, 3],
'float64': [1.1, np.nan, 3.3],
'category': pd.Categorical(['X', 'Y', 'Z']),
'object': ['a', 'b', 'c'],
'datetime64[ns]': dt_data,
'datetime64[ns, US/Eastern]': tz_data,
'timedelta64[ns]': td_data,
'period[M]': period_data}
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, pd.Index):
if label == 'bool':
assert obj.dtype == 'object'
else:
assert obj.dtype == label
elif isinstance(obj, pd.Series):
if label.startswith('period'):
assert obj.dtype == 'object'
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self):
# to confirm test case covers intended dtypes
for typ, vals in iteritems(self.data):
self._check_expected_dtype(pd.Index(vals), typ)
self._check_expected_dtype(pd.Series(vals), typ)
def test_concatlike_same_dtypes(self):
# GH 13660
for typ1, vals1 in iteritems(self.data):
vals2 = vals1
vals3 = vals1
if typ1 == 'category':
exp_data = pd.Categorical(list(vals1) + list(vals2))
exp_data3 = pd.Categorical(list(vals1) + list(vals2) +
list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = pd.Index(vals1).append(pd.Index(vals2))
exp = pd.Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = pd.Index(vals1).append([pd.Index(vals2), pd.Index(vals3)])
exp = pd.Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = pd.Index(vals1, name='x')
i2 = pd.Index(vals2, name='y')
res = i1.append(i2)
exp = pd.Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = pd.Index(vals1, name='x')
i2 = pd.Index(vals2, name='x')
res = i1.append(i2)
exp = pd.Index(exp_data, name='x')
tm.assert_index_equal(res, exp)
# cannot append non-index
with tm.assert_raises_regex(TypeError,
'all inputs must be Index'):
pd.Index(vals1).append(vals2)
with tm.assert_raises_regex(TypeError,
'all inputs must be Index'):
pd.Index(vals1).append([pd.Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = pd.Series(vals1).append(pd.Series(vals2),
ignore_index=True)
exp = pd.Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([pd.Series(vals1), pd.Series(vals2)],
ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = pd.Series(vals1).append([pd.Series(vals2), pd.Series(vals3)],
ignore_index=True)
exp = pd.Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat([pd.Series(vals1), pd.Series(vals2),
pd.Series(vals3)], ignore_index=True)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = pd.Series(vals1, name='x')
s2 = pd.Series(vals2, name='y')
res = s1.append(s2, ignore_index=True)
exp = pd.Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = pd.Series(vals1, name='x')
s2 = pd.Series(vals2, name='x')
res = s1.append(s2, ignore_index=True)
exp = pd.Series(exp_data, name='x')
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = "cannot concatenate a non-NDFrame object"
with tm.assert_raises_regex(TypeError, msg):
pd.Series(vals1).append(vals2)
with tm.assert_raises_regex(TypeError, msg):
pd.Series(vals1).append([pd.Series(vals2), vals3])
with tm.assert_raises_regex(TypeError, msg):
pd.concat([pd.Series(vals1), vals2])
with tm.assert_raises_regex(TypeError, msg):
pd.concat([pd.Series(vals1), pd.Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self):
# GH 13660
for typ1, vals1 in iteritems(self.data):
for typ2, vals2 in iteritems(self.data):
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
continue
elif typ1 == 'category' or typ2 == 'category':
# ToDo: suspicious
continue
# specify expected dtype
if typ1 == 'bool' and typ2 in ('int64', 'float64'):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
elif typ2 == 'bool' and typ1 in ('int64', 'float64'):
exp_series_dtype = typ1
elif (typ1 == 'datetime64[ns, US/Eastern]' or
typ2 == 'datetime64[ns, US/Eastern]' or
typ1 == 'timedelta64[ns]' or
typ2 == 'timedelta64[ns]'):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = pd.Index(vals1).append(pd.Index(vals2))
exp = pd.Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = pd.Index(vals1).append([pd.Index(vals2),
pd.Index(vals3)])
exp = pd.Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series.append
res = pd.Series(vals1).append(pd.Series(vals2),
ignore_index=True)
exp = pd.Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([pd.Series(vals1), pd.Series(vals2)],
ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = pd.Series(vals1).append([pd.Series(vals2),
pd.Series(vals3)],
ignore_index=True)
exp = pd.Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
res = pd.concat([pd.Series(vals1), pd.Series(vals2),
pd.Series(vals3)], ignore_index=True)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(['2011-01-01', '2011-01-02'])
tdi = pd.TimedeltaIndex(['1 days', '2 days'])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = pd.Series(dti)
tds = pd.Series(tdi)
res = dts.append(tds)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self):
# GH 7795
for tz in ['UTC', 'US/Eastern', 'Asia/Tokyo']:
dti1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
dti2 = pd.DatetimeIndex(['2012-01-01', '2012-01-02'], tz=tz)
exp = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2012-01-01', '2012-01-02'], tz=tz)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = pd.Series(dti1)
dts2 = pd.Series(dti2)
res = dts1.append(dts2)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_datetimetz_short(self):
# GH 7795
for tz in ['UTC', 'US/Eastern', 'Asia/Tokyo', 'EST5EDT']:
ix1 = pd.DatetimeIndex(start='2014-07-15', end='2014-07-17',
freq='D', tz=tz)
ix2 = pd.DatetimeIndex(['2014-07-11', '2014-07-21'], tz=tz)
df1 = pd.DataFrame(0, index=ix1, columns=['A', 'B'])
df2 = pd.DataFrame(0, index=ix2, columns=['A', 'B'])
exp_idx = pd.DatetimeIndex(['2014-07-15', '2014-07-16',
'2014-07-17', '2014-07-11',
'2014-07-21'], tz=tz)
exp = pd.DataFrame(0, index=exp_idx, columns=['A', 'B'])
tm.assert_frame_equal(df1.append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self):
# GH 13660
# different tz coerces to object
for tz in ['UTC', 'US/Eastern', 'Asia/Tokyo']:
dti1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
dti2 = pd.DatetimeIndex(['2012-01-01', '2012-01-02'])
exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02')], dtype=object)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = pd.Series(dti1)
dts2 = pd.Series(dti2)
res = dts1.append(dts2)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(['2012-01-01', '2012-01-02'],
tz='US/Pacific')
exp = pd.Index([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2012-01-01', tz='US/Pacific'),
pd.Timestamp('2012-01-02', tz='US/Pacific')],
dtype=object)
res = dti1.append(dti3)
# tm.assert_index_equal(res, exp)
dts1 = pd.Series(dti1)
dts3 = pd.Series(dti3)
res = dts1.append(dts3)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(['2011-01', '2011-02'], freq='M')
pi2 = pd.PeriodIndex(['2012-01', '2012-02'], freq='M')
exp = pd.PeriodIndex(['2011-01', '2011-02', '2012-01',
'2012-02'], freq='M')
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = pd.Series(pi1)
ps2 = pd.Series(pi2)
res = ps1.append(ps2)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(['2011-01', '2011-02'], freq='M')
pi2 = pd.PeriodIndex(['2012-01-01', '2012-02-01'], freq='D')
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Period('2011-02', freq='M'),
pd.Period('2012-01-01', freq='D'),
pd.Period('2012-02-01', freq='D')], dtype=object)
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = pd.Series(pi1)
ps2 = pd.Series(pi2)
res = ps1.append(ps2)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
# different datetimelike
pi1 = pd.PeriodIndex(['2011-01', '2011-02'], freq='M')
tdi = pd.TimedeltaIndex(['1 days', '2 days'])
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Period('2011-02', freq='M'),
pd.Timedelta('1 days'),
pd.Timedelta('2 days')], dtype=object)
res = pi1.append(tdi)
tm.assert_index_equal(res, exp)
ps1 = pd.Series(pi1)
tds = pd.Series(tdi)
res = ps1.append(tds)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, tds])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
# inverse
exp = pd.Index([pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Period('2011-01', freq='M'),
pd.Period('2011-02', freq='M')], dtype=object)
res = tdi.append(pi1)
tm.assert_index_equal(res, exp)
ps1 = pd.Series(pi1)
tds = pd.Series(tdi)
res = tds.append(ps1)
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([tds, ps1])
tm.assert_series_equal(res, pd.Series(exp, index=[0, 1, 0, 1]))
def test_concat_categorical(self):
# GH 13524
# same categories -> category
s1 = pd.Series([1, 2, np.nan], dtype='category')
s2 = pd.Series([2, 1, 2], dtype='category')
exp = pd.Series([1, 2, np.nan, 2, 1, 2], dtype='category')
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# partially different categories => not-category
s1 = pd.Series([3, 2], dtype='category')
s2 = pd.Series([2, 1], dtype='category')
exp = pd.Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# completelly different categories (same dtype) => not-category
s1 = pd.Series([10, 11, np.nan], dtype='category')
s2 = pd.Series([np.nan, 1, 3, 2], dtype='category')
exp = pd.Series([10, 11, np.nan, np.nan, 1, 3, 2])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
def test_concat_categorical_coercion(self):
# GH 13524
# category + not-category => not-category
s1 = pd.Series([1, 2, np.nan], dtype='category')
s2 = pd.Series([2, 1, 2])
exp = pd.Series([1, 2, np.nan, 2, 1, 2])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# result shouldn't be affected by 1st elem dtype
exp = pd.Series([2, 1, 2, 1, 2, np.nan])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# all values are not in category => not-category
s1 = pd.Series([3, 2], dtype='category')
s2 = pd.Series([2, 1])
exp = pd.Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = pd.Series([2, 1, 3, 2])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# completelly different categories => not-category
s1 = pd.Series([10, 11, np.nan], dtype='category')
s2 = pd.Series([1, 3, 2])
exp = pd.Series([10, 11, np.nan, 1, 3, 2])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = pd.Series([1, 3, 2, 10, 11, np.nan])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# different dtype => not-category
s1 = pd.Series([10, 11, np.nan], dtype='category')
s2 = pd.Series(['a', 'b', 'c'])
exp = pd.Series([10, 11, np.nan, 'a', 'b', 'c'])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = pd.Series(['a', 'b', 'c', 10, 11, np.nan])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# if normal series only contains NaN-likes => not-category
s1 = pd.Series([10, 11], dtype='category')
s2 = pd.Series([np.nan, np.nan, np.nan])
exp = pd.Series([10, 11, np.nan, np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = pd.Series([np.nan, np.nan, np.nan, 10, 11])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
def test_concat_categorical_3elem_coercion(self):
# GH 13524
# mixed dtypes => not-category
s1 = pd.Series([1, 2, np.nan], dtype='category')
s2 = pd.Series([2, 1, 2], dtype='category')
s3 = pd.Series([1, 2, 1, 2, np.nan])
exp = pd.Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
exp = pd.Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
s1 = pd.Series([4, 5, 6], dtype='category')
s2 = pd.Series([1, 2, 3], dtype='category')
s3 = pd.Series([1, 3, 4])
exp = pd.Series([4, 5, 6, 1, 2, 3, 1, 3, 4])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
exp = pd.Series([1, 3, 4, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
s1 = pd.Series([4, 5, 6], dtype='category')
s2 = pd.Series([1, 2, 3], dtype='category')
s3 = pd.Series([10, 11, 12])
exp = pd.Series([4, 5, 6, 1, 2, 3, 10, 11, 12])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s3], ignore_index=True), exp)
exp = pd.Series([10, 11, 12, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3.append([s1, s2], ignore_index=True), exp)
def test_concat_categorical_multi_coercion(self):
# GH 13524
s1 = pd.Series([1, 3], dtype='category')
s2 = pd.Series([3, 4], dtype='category')
s3 = pd.Series([2, 3])
s4 = pd.Series([2, 2], dtype='category')
s5 = pd.Series([1, np.nan])
s6 = pd.Series([1, 3, 2], dtype='category')
# mixed dtype, values are all in categories => not-category
exp = pd.Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2])
res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True)
tm.assert_series_equal(res, exp)
res = s1.append([s2, s3, s4, s5, s6], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = pd.Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3])
res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True)
tm.assert_series_equal(res, exp)
res = s6.append([s5, s4, s3, s2, s1], ignore_index=True)
tm.assert_series_equal(res, exp)
def test_concat_categorical_ordered(self):
# GH 13524
s1 = pd.Series(pd.Categorical([1, 2, np.nan], ordered=True))
s2 = pd.Series(pd.Categorical([2, 1, 2], ordered=True))
exp = pd.Series(pd.Categorical([1, 2, np.nan, 2, 1, 2], ordered=True))
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
exp = pd.Series(pd.Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan],
ordered=True))
tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s1.append([s2, s1], ignore_index=True), exp)
def test_concat_categorical_coercion_nan(self):
# GH 13524
# some edge cases
# category + not-category => not category
s1 = pd.Series(np.array([np.nan, np.nan], dtype=np.float64),
dtype='category')
s2 = pd.Series([np.nan, 1])
exp = pd.Series([np.nan, np.nan, np.nan, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
s1 = pd.Series([1, np.nan], dtype='category')
s2 = pd.Series([np.nan, np.nan])
exp = pd.Series([1, np.nan, np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
# mixed dtype, all nan-likes => not-category
s1 = pd.Series([np.nan, np.nan], dtype='category')
s2 = pd.Series([np.nan, np.nan])
exp = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
# all category nan-likes => category
s1 = pd.Series([np.nan, np.nan], dtype='category')
s2 = pd.Series([np.nan, np.nan], dtype='category')
exp = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype='category')
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
def test_concat_categorical_empty(self):
# GH 13524
s1 = pd.Series([], dtype='category')
s2 = pd.Series([1, 2], dtype='category')
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
tm.assert_series_equal(s2.append(s1, ignore_index=True), s2)
s1 = pd.Series([], dtype='category')
s2 = pd.Series([], dtype='category')
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)
s1 = pd.Series([], dtype='category')
s2 = pd.Series([])
# different dtype => not-category
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
tm.assert_series_equal(s1.append(s2, ignore_index=True), s2)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
tm.assert_series_equal(s2.append(s1, ignore_index=True), s2)
s1 = pd.Series([], dtype='category')
s2 = pd.Series([np.nan, np.nan])
# empty Series is ignored
exp = pd.Series([np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1.append(s2, ignore_index=True), exp)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2.append(s1, ignore_index=True), exp)
class TestAppend(ConcatenateBase):
def test_append(self):
begin_index = self.frame.index[:5]
end_index = self.frame.index[5:]
begin_frame = self.frame.reindex(begin_index)
end_frame = self.frame.reindex(end_index)
appended = begin_frame.append(end_frame)
tm.assert_almost_equal(appended['A'], self.frame['A'])
del end_frame['A']
partial_appended = begin_frame.append(end_frame)
assert 'A' in partial_appended
partial_appended = end_frame.append(begin_frame)
assert 'A' in partial_appended
# mixed type handling
appended = self.mixed_frame[:5].append(self.mixed_frame[5:])
tm.assert_frame_equal(appended, self.mixed_frame)
# what to test here
mixed_appended = self.mixed_frame[:5].append(self.frame[5:])
mixed_appended2 = self.frame[:5].append(self.mixed_frame[5:])
# all equal except 'foo' column
tm.assert_frame_equal(
mixed_appended.reindex(columns=['A', 'B', 'C', 'D']),
mixed_appended2.reindex(columns=['A', 'B', 'C', 'D']))
# append empty
empty = DataFrame({})
appended = self.frame.append(empty)
tm.assert_frame_equal(self.frame, appended)
assert appended is not self.frame
appended = empty.append(self.frame)
tm.assert_frame_equal(self.frame, appended)
assert appended is not self.frame
# Overlap
with pytest.raises(ValueError):
self.frame.append(self.frame, verify_integrity=True)
# see gh-6129: new columns
df = DataFrame({'a': {'x': 1, 'y': 2}, 'b': {'x': 3, 'y': 4}})
row = Series([5, 6, 7], index=['a', 'b', 'c'], name='z')
expected = DataFrame({'a': {'x': 1, 'y': 2, 'z': 5}, 'b': {
'x': 3, 'y': 4, 'z': 6}, 'c': {'z': 7}})
result = df.append(row)
tm.assert_frame_equal(result, expected)
def test_append_length0_frame(self):
df = DataFrame(columns=['A', 'B', 'C'])
df3 = DataFrame(index=[0, 1], columns=['A', 'B'])
df5 = df.append(df3)
expected = DataFrame(index=[0, 1], columns=['A', 'B', 'C'])
assert_frame_equal(df5, expected)
def test_append_records(self):
arr1 = np.zeros((2,), dtype=('i4,f4,a10'))
arr1[:] = [(1, 2., 'Hello'), (2, 3., "World")]
arr2 = np.zeros((3,), dtype=('i4,f4,a10'))
arr2[:] = [(3, 4., 'foo'),
(5, 6., "bar"),
(7., 8., 'baz')]
df1 = DataFrame(arr1)
df2 = DataFrame(arr2)
result = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate((arr1, arr2)))
assert_frame_equal(result, expected)
def test_append_different_columns(self):
df = DataFrame({'bools': np.random.randn(10) > 0,
'ints': np.random.randint(0, 10, 10),
'floats': np.random.randn(10),
'strings': ['foo', 'bar'] * 5})
a = df[:5].loc[:, ['bools', 'ints', 'floats']]
b = df[5:].loc[:, ['strings', 'ints', 'floats']]
appended = a.append(b)
assert isna(appended['strings'][0:4]).all()
assert isna(appended['bools'][5:]).all()
def test_append_many(self):
chunks = [self.frame[:5], self.frame[5:10],
self.frame[10:15], self.frame[15:]]
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result, self.frame)
chunks[-1] = chunks[-1].copy()
chunks[-1]['foo'] = 'bar'
result = chunks[0].append(chunks[1:])
tm.assert_frame_equal(result.loc[:, self.frame.columns], self.frame)
assert (result['foo'][15:] == 'bar').all()
assert result['foo'][:15].isna().all()
def test_append_preserve_index_name(self):
# #980
df1 = DataFrame(data=None, columns=['A', 'B', 'C'])
df1 = df1.set_index(['A'])
df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]],
columns=['A', 'B', 'C'])
df2 = df2.set_index(['A'])
result = df1.append(df2)
assert result.index.name == 'A'
def test_append_dtype_coerce(self):
# GH 4993
# appending with datetime will incorrectly convert datetime64
import datetime as dt
from pandas import NaT
df1 = DataFrame(index=[1, 2], data=[dt.datetime(2013, 1, 1, 0, 0),
dt.datetime(2013, 1, 2, 0, 0)],
columns=['start_time'])
df2 = DataFrame(index=[4, 5], data=[[dt.datetime(2013, 1, 3, 0, 0),
dt.datetime(2013, 1, 3, 6, 10)],
[dt.datetime(2013, 1, 4, 0, 0),
dt.datetime(2013, 1, 4, 7, 10)]],
columns=['start_time', 'end_time'])
expected = concat([Series([NaT, NaT, dt.datetime(2013, 1, 3, 6, 10),
dt.datetime(2013, 1, 4, 7, 10)],
name='end_time'),
Series([dt.datetime(2013, 1, 1, 0, 0),
dt.datetime(2013, 1, 2, 0, 0),
dt.datetime(2013, 1, 3, 0, 0),
dt.datetime(2013, 1, 4, 0, 0)],
name='start_time')], axis=1)
result = df1.append(df2, ignore_index=True)
assert_frame_equal(result, expected)
def test_append_missing_column_proper_upcast(self):
df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8')})
df2 = DataFrame({'B': np.array([True, False, True, False],
dtype=bool)})
appended = df1.append(df2, ignore_index=True)
assert appended['A'].dtype == 'f8'
assert appended['B'].dtype == 'O'
class TestConcatenate(ConcatenateBase):
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: 'foo'}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._data.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._data.blocks:
if b.is_float:
assert b.values.base is df._data.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._data.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._data.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._data.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 0, 1, 2, 3]])
expected = DataFrame(np.r_[df.values, df2.values],
index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values],
index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values],
columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values],
columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ['three', 'two', 'one', 'zero']
result = concat(pieces, axis=1, keys=['one', 'two', 'three'],
levels=[level],
names=['group_key'])
tm.assert_index_equal(result.columns.levels[0],
Index(level, name='group_key'))
assert result.columns.names[0] == 'group_key'
def test_concat_dataframe_keys_bug(self):
t1 = DataFrame({
'value': Series([1, 2, 3], index=Index(['a', 'b', 'c'],
name='id'))})
t2 = DataFrame({
'value': Series([7, 8], index=Index(['a', 'b'], name='id'))})
# it works
result = concat([t1, t2], axis=1, keys=['t1', 't2'])
assert list(result.columns) == [('t1', 'value'), ('t2', 'value')]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name='foo')
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame({'foo': [1, 2], 0: [1, 2], 1: [
4, 5]}, columns=['foo', 0, 1])
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=[
'red', 'blue', 'yellow'])
expected = DataFrame({'red': [1, 2], 'blue': [1, 2], 'yellow': [
4, 5]}, columns=['red', 'blue', 'yellow'])
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
def test_concat_dict(self):
frames = {'foo': DataFrame(np.random.randn(4, 3)),
'bar': DataFrame(np.random.randn(4, 3)),
'baz': DataFrame(np.random.randn(4, 3)),
'qux': DataFrame(np.random.randn(4, 3))}
sorted_keys = sorted(frames)
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys,
axis=1)
tm.assert_frame_equal(result, expected)
keys = ['baz', 'foo', 'bar']
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self):
frame1 = DataFrame({"test1": ["a", "b", "c"],
"test2": [1, 2, 3],
"test3": [4.5, 3.2, 1.2]})
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True)
nan = np.nan
expected = DataFrame([[nan, nan, nan, 4.3],
['a', 1, 4.5, 5.2],
['b', 2, 3.2, 2.2],
['c', 3, 1.2, nan]],
index=Index(["q", "x", "y", "z"]))
tm.assert_frame_equal(v1, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
result = concat([frame, frame], keys=[0, 1], names=['iteration'])
assert result.index.names == ('iteration',) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_tz(self):
# GH 6606
df = DataFrame({'dt': [datetime(2014, 1, 1),
datetime(2014, 1, 2),
datetime(2014, 1, 3)],
'b': ['A', 'B', 'C'],
'c': [1, 2, 3], 'd': [4, 5, 6]})
df['dt'] = df['dt'].apply(lambda d: Timestamp(d, tz='US/Pacific'))
df = df.set_index(['dt', 'b'])
exp_idx1 = DatetimeIndex(['2014-01-01', '2014-01-02',
'2014-01-03'] * 2,
tz='US/Pacific', name='dt')
exp_idx2 = Index(['A', 'B', 'C'] * 2, name='b')
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'c': [1, 2, 3] * 2, 'd': [4, 5, 6] * 2},
index=exp_idx, columns=['c', 'd'])
result = concat([df, df])
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)],
names=['level1', None])
df = pd.DataFrame({'col': range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=['level2'])
index = pd.MultiIndex.from_product([[1, 2], [1], range(5)],
names=['level2', 'level1', None])
expected = pd.DataFrame({'col': list(range(5)) * 2},
index=index, dtype=np.int32)
assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=['level2'])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples,
names=['level2', 'level1', None])
expected = pd.DataFrame({'col': no_name}, index=index,
dtype=np.int32)
assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [['foo', 'baz'], ['one', 'two']]
names = ['first', 'second']
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
levels=levels,
names=names)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(levels=levels + [[0]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1],
[0, 0, 0, 0]],
names=names + [None])
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
levels=levels)
assert result.index.names == (None,) * 3
# no levels
result = concat([df, df2, df, df2],
keys=[('foo', 'one'), ('foo', 'two'),
('baz', 'one'), ('baz', 'two')],
names=['first', 'second'])
assert result.index.names == ('first', 'second') + (None,)
tm.assert_index_equal(result.index.levels[0],
Index(['baz', 'foo'], name='first'))
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=['a'])
df2 = DataFrame(np.random.randn(1, 4), index=['b'])
pytest.raises(ValueError, concat, [df, df],
keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])
pytest.raises(ValueError, concat, [df, df2],
keys=['one', 'two'], levels=[['foo', 'bar', 'baz']])
def test_concat_rename_index(self):
a = DataFrame(np.random.rand(3, 3),
columns=list('ABC'),
index=Index(list('abc'), name='index_a'))
b = DataFrame(np.random.rand(3, 3),
columns=list('ABC'),
index=Index(list('abc'), name='index_b'))
result = concat([a, b], keys=['key0', 'key1'],
names=['lvl0', 'lvl1'])
exp = concat([a, b], keys=['key0', 'key1'], names=['lvl0'])
names = list(exp.index.names)
names[1] = 'lvl1'
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ['A', 'B', 'C', 'D']
df1 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='f8'),
'B': np.array([1, 2, 3, 4], dtype='i8'),
'C': np.array([1, 2, 3, 4], dtype='f8'),
'D': np.array([1, 2, 3, 4], dtype='i8')},
columns=columns)
df2 = DataFrame({'A': np.array([1, 2, 3, 4], dtype='i8'),
'B': np.array([1, 2, 3, 4], dtype='f8'),
'C': np.array([1, 2, 3, 4], dtype='i8'),
'D': np.array([1, 2, 3, 4], dtype='f8')},
columns=columns)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(np.concatenate([df1.values, df2.values], axis=0),
columns=columns)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=['a'])
df2 = DataFrame(np.random.randn(1, 4), index=['b'])
result = concat(
[df, df2], keys=['one', 'two'], names=['first', 'second'])
assert result.index.names == ('first', 'second')
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(np.random.randint(0, 10, size=40).reshape(
10, 4), columns=['A', 'A', 'C', 'C'])
result = concat([df, df], axis=1)
assert_frame_equal(result.iloc[:, :4], df)
assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
assert_frame_equal(result.iloc[:10], df)
assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
result = concat([df, df], axis=1)
assert_frame_equal(result.iloc[:, :6], df)
assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
assert_frame_equal(result.iloc[:10], df)
assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
assert_frame_equal(result, expected)
def test_with_mixed_tuples(self):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({u'A': 'foo', (u'B', 1): 'bar'}, index=range(2))
df2 = DataFrame({u'B': 'foo', (u'B', 1): 'bar'}, index=range(2))
# it works
concat([df1, df2])
def test_handle_empty_objects(self):
df = DataFrame(np.random.randn(10, 4), columns=list('abcd'))
baz = df[:5].copy()
baz['foo'] = 'bar'
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0)
expected = df.loc[:, ['a', 'b', 'c', 'd', 'foo']]
expected['foo'] = expected['foo'].astype('O')
expected.loc[0:4, 'foo'] = 'bar'
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(dict(A=range(10000)), index=date_range(
'20130101', periods=10000, freq='s'))
empty = DataFrame()
result = concat([df, empty], axis=1)
assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
assert_frame_equal(result, df)
result = concat([df, empty])
assert_frame_equal(result, df)
result = concat([empty, df])
assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range('01-Jan-2013', periods=10, freq='H')
arr = np.arange(10, dtype='int64')
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(np.repeat(arr, 2).reshape(-1, 2),
index=index, columns=[0, 0])
result = concat([df, df], axis=1)
assert_frame_equal(result, expected)
expected = DataFrame(np.repeat(arr, 2).reshape(-1, 2),
index=index, columns=[0, 1])
result = concat([s1, s2], axis=1)
assert_frame_equal(result, expected)
expected = DataFrame(np.repeat(arr, 3).reshape(-1, 3),
index=index, columns=[0, 1, 2])
result = concat([s1, s2, s1], axis=1)
assert_frame_equal(result, expected)
expected = DataFrame(np.repeat(arr, 5).reshape(-1, 5),
index=index, columns=[0, 0, 1, 2, 3])
result = concat([s1, df, s2, s2, s1], axis=1)
assert_frame_equal(result, expected)
# with names
s1.name = 'foo'
expected = DataFrame(np.repeat(arr, 3).reshape(-1, 3),
index=index, columns=['foo', 0, 0])
result = concat([s1, df, s2], axis=1)
assert_frame_equal(result, expected)
s2.name = 'bar'
expected = DataFrame(np.repeat(arr, 3).reshape(-1, 3),
index=index, columns=['foo', 0, 'bar'])
result = concat([s1, df, s2], axis=1)
assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(np.repeat(arr, 3).reshape(-1, 3),
index=index, columns=[0, 1, 2])
result = concat([s1, df, s2], axis=1, ignore_index=True)
assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1),
index=index.tolist() * 3, columns=[0])
result = concat([s1, df, s2])
assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
assert_frame_equal(result, expected)
# invalid concatente of mixed dims
with catch_warnings(record=True):
panel = tm.makePanel()
pytest.raises(ValueError, lambda: concat([panel, s1], axis=1))
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=['a', 'b'])
df2 = DataFrame(data=[[3, None], [4, None]], columns=['a', 'b'])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({'date': [pd.Timestamp('20130101').tz_localize('UTC'),
pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame({'date': [datetime.datetime(2012, 1, 1),
datetime.datetime(1012, 1, 2)]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({'text': ['some words'] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_panel_concat_other_axes(self):
with catch_warnings(record=True):
panel = tm.makePanel()
p1 = panel.iloc[:, :5, :]
p2 = panel.iloc[:, 5:, :]
result = concat([p1, p2], axis=1)
tm.assert_panel_equal(result, panel)
p1 = panel.iloc[:, :, :2]
p2 = panel.iloc[:, :, 2:]
result = concat([p1, p2], axis=2)
tm.assert_panel_equal(result, panel)
# if things are a bit misbehaved
p1 = panel.iloc[:2, :, :2]
p2 = panel.iloc[:, :, 2:]
p1['ItemC'] = 'baz'
result = concat([p1, p2], axis=2)
expected = panel.copy()
expected['ItemC'] = expected['ItemC'].astype('O')
expected.loc['ItemC', :, :2] = 'baz'
tm.assert_panel_equal(result, expected)
def test_panel_concat_buglet(self):
with catch_warnings(record=True):
# #2257
def make_panel():
index = 5
cols = 3
def df():
return DataFrame(np.random.randn(index, cols),
index=["I%s" % i for i in range(index)],
columns=["C%s" % i for i in range(cols)])
return Panel(dict([("Item%s" % x, df())
for x in ['A', 'B', 'C']]))
panel1 = make_panel()
panel2 = make_panel()
panel2 = panel2.rename_axis(dict([(x, "%s_1" % x)
for x in panel2.major_axis]),
axis=1)
panel3 = panel2.rename_axis(lambda x: '%s_1' % x, axis=1)
panel3 = panel3.rename_axis(lambda x: '%s_1' % x, axis=2)
# it works!
concat([panel1, panel3], axis=1, verify_integrity=True)
def test_panel4d_concat(self):
with catch_warnings(record=True):
p4d = tm.makePanel4D()
p1 = p4d.iloc[:, :, :5, :]
p2 = p4d.iloc[:, :, 5:, :]
result = concat([p1, p2], axis=2)
tm.assert_panel4d_equal(result, p4d)
p1 = p4d.iloc[:, :, :, :2]
p2 = p4d.iloc[:, :, :, 2:]
result = concat([p1, p2], axis=3)
tm.assert_panel4d_equal(result, p4d)
def test_panel4d_concat_mixed_type(self):
with catch_warnings(record=True):
p4d = tm.makePanel4D()
# if things are a bit misbehaved
p1 = p4d.iloc[:, :2, :, :2]
p2 = p4d.iloc[:, :, :, 2:]
p1['L5'] = 'baz'
result = concat([p1, p2], axis=3)
p2['L5'] = np.nan
expected = concat([p1, p2], axis=3)
expected = expected.loc[result.labels]
tm.assert_panel4d_equal(result, expected)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = 'foo'
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype='M8[ns]'))
exp_labels = [np.repeat([0, 1, 2], [len(x) for x in pieces]),
np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index],
labels=exp_labels)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
assert_frame_equal(result, expected)
result = concat(pieces, keys=['A', 'B', 'C'], axis=1)
expected = DataFrame(pieces, index=['A', 'B', 'C']).T
assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name='A')
s2 = Series(randn(5), name='B')
result = concat([s, s2], axis=1)
expected = DataFrame({'A': s, 'B': s2})
assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns,
Index(['A', 0], dtype='object'))
# must reindex, #2603
s = Series(randn(3), index=['c', 'a', 'b'], name='A')
s2 = Series(randn(4), index=['d', 'a', 'b', 'c'], name='B')
result = concat([s, s2], axis=1)
expected = DataFrame({'A': s, 'B': s2})
assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=['foo'])
expected = concat([df, df], keys=['foo', 'bar'])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
pytest.raises(ValueError, concat, [None, None])
def test_concat_datetime64_block(self):
from pandas.core.indexes.datetimes import date_range
rng = date_range('1/1/2000', periods=10)
df = DataFrame({'time': rng})
result = concat([df, df])
assert (result.iloc[:10]['time'] == rng).all()
assert (result.iloc[10:]['time'] == rng).all()
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit='s')
df = DataFrame({'time': rng})
result = concat([df, df])
assert (result.iloc[:10]['time'] == rng).all()
assert (result.iloc[10:]['time'] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat([None, df0, df0[:2], df0[:1], df0],
keys=['a', 'b', 'c', 'd', 'e'])
expected = concat([df0, df0[:2], df0[:1], df0],
keys=['b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join='outer', axis=1)
right = concat([ts2, ts1], join='outer', axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = 'same name'
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ['same name', 'same name']
assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame({'firmNo': [0, 0, 0, 0], 'stringvar': [
'rrr', 'rrr', 'rrr', 'rrr'], 'prc': [6, 6, 6, 6]})
df2 = DataFrame({'misc': [1, 2, 3, 4], 'prc': [
6, 6, 6, 6], 'C': [9, 10, 11, 12]})
expected = DataFrame([[0, 6, 'rrr', 9, 1, 6],
[0, 6, 'rrr', 10, 2, 6],
[0, 6, 'rrr', 11, 3, 6],
[0, 6, 'rrr', 12, 4, 6]])
expected.columns = ['firmNo', 'prc', 'stringvar', 'C', 'misc', 'prc']
result = concat([df1, df2], axis=1)
assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = pd.DataFrame()
df_a = pd.DataFrame({'a': [1, 2]}, index=[0, 1], dtype='int64')
df_expected = pd.DataFrame({'a': []}, index=[], dtype='int64')
for how, expected in [('inner', df_expected), ('outer', df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range('01-Jan-2013', '01-Jan-2014', freq='MS')[0:-1]
s1 = Series(randn(len(dates)), index=dates, name='value')
s2 = Series(randn(len(dates)), index=dates, name='value')
result = concat([s1, s2], axis=1, ignore_index=True)
assert np.array_equal(result.columns, [0, 1])
def test_concat_iterables(self):
from collections import deque, Iterable
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
assert_frame_equal(concat((df for df in (df1, df2)),
ignore_index=True), expected)
assert_frame_equal(
concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1(object):
def __len__(self):
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError:
raise IndexError
assert_frame_equal(pd.concat(CustomIterator1(),
ignore_index=True), expected)
class CustomIterator2(Iterable):
def __iter__(self):
yield df1
yield df2
assert_frame_equal(pd.concat(CustomIterator2(),
ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = mkdf(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
pytest.raises(TypeError, lambda x: concat([df1, obj]))
def test_concat_invalid_first_argument(self):
df1 = mkdf(10, 2)
df2 = mkdf(10, 2)
pytest.raises(TypeError, concat, df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
assert_frame_equal(result, expected)
def test_concat_NaT_series(self):
# GH 11693
# test for merging NaT series with datetime series.
x = Series(date_range('20151124 08:00', '20151124 09:00',
freq='1h', tz='US/Eastern'))
y = Series(pd.NaT, index=[0, 1], dtype='datetime64[ns, US/Eastern]')
expected = Series([x[0], x[1], pd.NaT, pd.NaT])
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# all NaT with tz
expected = Series(pd.NaT, index=range(4),
dtype='datetime64[ns, US/Eastern]')
result = pd.concat([y, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# without tz
x = pd.Series(pd.date_range('20151124 08:00',
'20151124 09:00', freq='1h'))
y = pd.Series(pd.date_range('20151124 10:00',
'20151124 11:00', freq='1h'))
y[:] = pd.NaT
expected = pd.Series([x[0], x[1], pd.NaT, pd.NaT])
result = pd.concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# all NaT without tz
x[:] = pd.NaT
expected = pd.Series(pd.NaT, index=range(4),
dtype='datetime64[ns]')
result = pd.concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
def test_concat_tz_frame(self):
df2 = DataFrame(dict(A=pd.Timestamp('20130102', tz='US/Eastern'),
B=pd.Timestamp('20130603', tz='CET')),
index=range(5))
# concat
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
assert_frame_equal(df2, df3)
def test_concat_tz_series(self):
# gh-11755: tz and no tz
x = Series(date_range('20151124 08:00',
'20151124 09:00',
freq='1h', tz='UTC'))
y = Series(date_range('2012-01-01', '2012-01-02'))
expected = Series([x[0], x[1], y[0], y[1]],
dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# gh-11887: concat tz and object
x = Series(date_range('20151124 08:00',
'20151124 09:00',
freq='1h', tz='UTC'))
y = Series(['a', 'b'])
expected = Series([x[0], x[1], y[0], y[1]],
dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# see gh-12217 and gh-12306
# Concatenating two UTC times
first = pd.DataFrame([[datetime(2016, 1, 1)]])
first[0] = first[0].dt.tz_localize('UTC')
second = pd.DataFrame([[datetime(2016, 1, 2)]])
second[0] = second[0].dt.tz_localize('UTC')
result = pd.concat([first, second])
assert result[0].dtype == 'datetime64[ns, UTC]'
# Concatenating two London times
first = pd.DataFrame([[datetime(2016, 1, 1)]])
first[0] = first[0].dt.tz_localize('Europe/London')
second = pd.DataFrame([[datetime(2016, 1, 2)]])
second[0] = second[0].dt.tz_localize('Europe/London')
result = pd.concat([first, second])
assert result[0].dtype == 'datetime64[ns, Europe/London]'
# Concatenating 2+1 London times
first = pd.DataFrame([[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]])
first[0] = first[0].dt.tz_localize('Europe/London')
second = pd.DataFrame([[datetime(2016, 1, 3)]])
second[0] = second[0].dt.tz_localize('Europe/London')
result = pd.concat([first, second])
assert result[0].dtype == 'datetime64[ns, Europe/London]'
# Concat'ing 1+2 London times
first = pd.DataFrame([[datetime(2016, 1, 1)]])
first[0] = first[0].dt.tz_localize('Europe/London')
second = pd.DataFrame([[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]])
second[0] = second[0].dt.tz_localize('Europe/London')
result = pd.concat([first, second])
assert result[0].dtype == 'datetime64[ns, Europe/London]'
def test_concat_tz_series_with_datetimelike(self):
# see gh-12620: tz and timedelta
x = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-02-01', tz='US/Eastern')]
y = [pd.Timedelta('1 day'), pd.Timedelta('2 day')]
result = concat([pd.Series(x), pd.Series(y)], ignore_index=True)
tm.assert_series_equal(result, pd.Series(x + y, dtype='object'))
# tz and period
y = [pd.Period('2011-03', freq='M'), pd.Period('2011-04', freq='M')]
result = concat([pd.Series(x), pd.Series(y)], ignore_index=True)
tm.assert_series_equal(result, pd.Series(x + y, dtype='object'))
def test_concat_tz_series_tzlocal(self):
# see gh-13583
x = [pd.Timestamp('2011-01-01', tz=dateutil.tz.tzlocal()),
pd.Timestamp('2011-02-01', tz=dateutil.tz.tzlocal())]
y = [pd.Timestamp('2012-01-01', tz=dateutil.tz.tzlocal()),
pd.Timestamp('2012-02-01', tz=dateutil.tz.tzlocal())]
result = concat([pd.Series(x), pd.Series(y)], ignore_index=True)
tm.assert_series_equal(result, pd.Series(x + y))
assert result.dtype == 'datetime64[ns, tzlocal()]'
def test_concat_period_series(self):
x = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='D'))
y = Series(pd.PeriodIndex(['2015-10-01', '2016-01-01'], freq='D'))
expected = Series([x[0], x[1], y[0], y[1]], dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == 'object'
# different freq
x = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='D'))
y = Series(pd.PeriodIndex(['2015-10-01', '2016-01-01'], freq='M'))
expected = Series([x[0], x[1], y[0], y[1]], dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == 'object'
x = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='D'))
y = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='M'))
expected = Series([x[0], x[1], y[0], y[1]], dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == 'object'
# non-period
x = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='D'))
y = Series(pd.DatetimeIndex(['2015-11-01', '2015-12-01']))
expected = Series([x[0], x[1], y[0], y[1]], dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == 'object'
x = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='D'))
y = Series(['A', 'B'])
expected = Series([x[0], x[1], y[0], y[1]], dtype='object')
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == 'object'
def test_concat_empty_series(self):
# GH 11082
s1 = pd.Series([1, 2, 3], name='x')
s2 = pd.Series(name='y')
res = pd.concat([s1, s2], axis=1)
exp = pd.DataFrame({'x': [1, 2, 3], 'y': [np.nan, np.nan, np.nan]})
tm.assert_frame_equal(res, exp)
s1 = pd.Series([1, 2, 3], name='x')
s2 = pd.Series(name='y')
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = pd.Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = pd.Series([1, 2, 3], name='x')
s2 = pd.Series(name=None)
res = pd.concat([s1, s2], axis=1)
exp = pd.DataFrame({'x': [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=['x', 0])
tm.assert_frame_equal(res, exp)
def test_default_index(self):
# is_series and ignore_index
s1 = pd.Series([1, 2, 3], name='x')
s2 = pd.Series([4, 5, 6], name='y')
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = pd.DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True,
check_column_type=True)
# is_series and all inputs have no names
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = pd.DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True,
check_column_type=True)
# is_dataframe and ignore_index
df1 = pd.DataFrame({'A': [1, 2], 'B': [5, 6]})
df2 = pd.DataFrame({'A': [3, 4], 'B': [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = pd.DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]],
columns=['A', 'B'])
tm.assert_frame_equal(res, exp, check_index_type=True,
check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = pd.DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True,
check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
labels=[np.repeat(np.arange(3), 3),
np.tile(np.arange(3), 3)])
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([['a'], ['b']])
example_dataframe1 = pd.DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([['a'], ['c']])
example_dataframe2 = pd.DataFrame([1], index=example_multiindex2)
example_dict = {'s1': example_dataframe1, 's2': example_dataframe2}
expected_index = pd.MultiIndex(levels=[['s1', 's2'],
['a'],
['b', 'c']],
labels=[[0, 1], [0, 0], [0, 1]],
names=['testname', None, None])
expected = pd.DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=['testname'])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=['testname'])
tm.assert_frame_equal(result_no_copy, expected)
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = pd.Series(1, index=pd.CategoricalIndex([9, 0],
categories=categories))
b = pd.Series(2, index=pd.CategoricalIndex([0, 1],
categories=categories))
c = pd.Series(3, index=pd.CategoricalIndex([1, 2],
categories=categories))
result = pd.concat([a, b, c], axis=1)
exp_idx = pd.CategoricalIndex([0, 1, 2, 9])
exp = pd.DataFrame({0: [1, np.nan, np.nan, 1],
1: [2, 2, np.nan, np.nan],
2: [np.nan, 3, 3, np.nan]},
columns=[0, 1, 2],
index=exp_idx)
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize('pdt', [pd.Series, pd.DataFrame, pd.Panel])
@pytest.mark.parametrize('dt', np.sctypes['float'])
def test_concat_no_unnecessary_upcast(dt, pdt):
with catch_warnings(record=True):
# GH 13247
dims = pdt().ndim
dfs = [pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims))]
x = pd.concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize('pdt', [pd.Series, pd.DataFrame, pd.Panel])
@pytest.mark.parametrize('dt', np.sctypes['int'])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims))]
x = pd.concat(dfs)
assert x.values.dtype == 'float64'
| bsd-3-clause |
konstantinstadler/pymrio | doc/source/notebooks/stressor_characterization.py | 1 | 5412 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Characterization of stressors
# The characterization of stressors is a standard procedure to calculate the environmental and social impacts of economic activity. This is usually accomplished by multiplying (matrix-multiplication) the stressor-matrix with a characterization-matrix. Doing that in the matrix forms requires a 1:1 correspondence of the columns of the characterization matrix to the rows of the stressor-matrix.
# Pymrio uses a different approach with matching the strings of the
# characterization table (given in long-format) to the available stressors. By
# doing that, the order of the entries in the characterization-table becomes
# unimportant.
# This implementation also allows to use characterization tables which includes
# characterization for stressors not present in the given satellite account. All
# characterizations relying on not available stressor will be automatically
# removed.
# ## Example
# For this example we use the test MRIO included in Pymrio. We also need
# the Pandas library for loading the characterization table and pathlib for some folder manipulation.
from pathlib import Path
import pandas as pd
import pymrio
from pymrio.core.constants import PYMRIO_PATH # noqa
# To load the test MRIO we use:
io = pymrio.load_test()
# and the characterization table with some foo factors can be loaded by
charact_table = pd.read_csv(
(PYMRIO_PATH["test_mrio"] / Path("concordance") / "emissions_charact.tsv"),
sep="\t",
)
charact_table
# This table contains the columns 'stressor' and 'compartment' which correspond
# to the index names of the test_mrio emission satellite accounts:
io.emissions.F
# Theses index-names / columns-names need to match in order to match
# characterization factors to the stressors.
# The other columns names can be passed to the characterization method. By default the method assumes the following column names:
#
# - impact: name of the characterization/impact
# - factor: the numerical (float) multiplication value for a specific stressor to derive the impact/characterized account
# - impact_unit: the unit of the calculated characterization/impact
#
# Alternative names can be passed through the parameters
# *characterized_name_column*, *characterization_factors_column* and *characterized_unit_column*.
#
# Note, that units of stressor are currently not checked - units as given in
# the satellite account to be characterized are assumed. These can be seen by:
io.emissions.unit
# Also note, that the charact_table contains a characterization called 'total
# emissions', for which the calculation requires a stressor not present in the
# satellite account. This will be automatically omitted.
# To calculate the characterization we use
impacts = io.emissions.characterize(charact_table, name="impacts")
# The parameter *name* is optional, if omitted the name will be set to
# extension_name + _characterized
# The method call above results in a pymrio.Extension which can be inspected with the usual
# methods, e.g.:
impacts.F
impacts.F_Y
# and the extension can be added to the MRIO
io.impacts = impacts
# and used for subsequent calculations:
io.calc_all()
io.impacts.D_cba
# ### Characterizing calculated results
# The characterize method can also be used to characterize already calculated
# results. This works in the same way:
io_aly = pymrio.load_test().calc_all()
io_aly.emissions.D_cba
io_aly.impacts = io_aly.emissions.characterize(charact_table, name="impacts_new")
# Note, that all results which can be characterized directly (all flow accounts
# like D_cba, D_pba, ...) are automatically included:
io_aly.impacts.D_cba
# Whereas coefficient accounts (M, S) are removed:
io_aly.impacts.M
# To calculated these use
io_aly.calc_all()
io_aly.impacts.M
# which will calculate the missing accounts.
# For these calculations, the characterized accounts can also be used outside
# the MRIO system. Thus:
independent_extension = io_aly.emissions.characterize(charact_table, name="impacts_new")
type(independent_extension)
independent_extension.M
independent_extension_calc = independent_extension.calc_system(x=io_aly.x, Y=io_aly.Y)
independent_extension.M
# ## Inspecting the used characterization table
# Pymrio automatically adjust the characterization table by removing accounts
# which can not be calculated using a given extension. The removed accounts are
# reported through a warning message (e.g. "WARNING:root:Impact >total
# emissions< removed - calculation requires stressors not present in extension
# >Emissions<" in the examples above).
#
# It is also possible, to obtain the cleaned characterization-table for
# inspection and further use. To do so:
impacts = io.emissions.characterize(
charact_table, name="impacts", return_char_matrix=True
)
# This changes the return type from a pymrio.Extension to a named tuple
type(impacts)
# with
impacts.extension
# and
impacts.factors
# The latter is the characterization table used for the calculation.
#
# For further information see the characterization docstring:
print(io.emissions.characterize.__doc__)
| gpl-3.0 |
anurag313/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
MartinSavc/scikit-learn | sklearn/neighbors/unsupervised.py | 22 | 4751 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/coreg.py | 10 | 38824 | """Coregistration between different coordinate frames"""
# Authors: Christian Brodbeck <[email protected]>
#
# License: BSD (3-clause)
from .externals.six.moves import configparser
import fnmatch
from glob import glob, iglob
import os
import stat
import sys
import re
import shutil
from warnings import warn
import numpy as np
from numpy import dot
from .io.meas_info import read_fiducials, write_fiducials
from .label import read_label, Label
from .source_space import (add_source_space_distances, read_source_spaces,
write_source_spaces)
from .surface import read_surface, write_surface
from .bem import read_bem_surfaces, write_bem_surfaces
from .transforms import rotation, rotation3d, scaling, translation
from .utils import get_config, get_subjects_dir, logger, pformat
from functools import reduce
from .externals.six.moves import zip
# some path templates
trans_fname = os.path.join('{raw_dir}', '{subject}-trans.fif')
subject_dirname = os.path.join('{subjects_dir}', '{subject}')
bem_dirname = os.path.join(subject_dirname, 'bem')
surf_dirname = os.path.join(subject_dirname, 'surf')
bem_fname = os.path.join(bem_dirname, "{subject}-{name}.fif")
head_bem_fname = pformat(bem_fname, name='head')
fid_fname = pformat(bem_fname, name='fiducials')
fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif")
src_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif')
def _make_writable(fname):
os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128) # write
def _make_writable_recursive(path):
"""Recursively set writable"""
if sys.platform.startswith('win'):
return # can't safely set perms
for root, dirs, files in os.walk(path, topdown=False):
for f in dirs + files:
_make_writable(os.path.join(root, f))
def create_default_subject(mne_root=None, fs_home=None, update=False,
subjects_dir=None):
"""Create an average brain subject for subjects without structural MRI
Create a copy of fsaverage from the Freesurfer directory in subjects_dir
and add auxiliary files from the mne package.
Parameters
----------
mne_root : None | str
The mne root directory (only needed if MNE_ROOT is not specified as
environment variable).
fs_home : None | str
The freesurfer home directory (only needed if FREESURFER_HOME is not
specified as environment variable).
update : bool
In cases where a copy of the fsaverage brain already exists in the
subjects_dir, this option allows to only copy files that don't already
exist in the fsaverage directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(os.environ['SUBJECTS_DIR']) as destination for the new subject.
Notes
-----
When no structural MRI is available for a subject, an average brain can be
substituted. Freesurfer comes with such an average brain model, and MNE
comes with some auxiliary files which make coregistration easier.
:py:func:`create_default_subject` copies the relevant files from Freesurfer
into the current subjects_dir, and also adds the auxiliary files provided
by MNE.
The files provided by MNE are listed below and can be found under
``share/mne/mne_analyze/fsaverage`` in the MNE directory (see MNE manual
section 7.19 Working with the average brain):
fsaverage_head.fif:
The approximate head surface triangulation for fsaverage.
fsaverage_inner_skull-bem.fif:
The approximate inner skull surface for fsaverage.
fsaverage-fiducials.fif:
The locations of the fiducial points (LPA, RPA, and nasion).
fsaverage-trans.fif:
Contains a default MEG-MRI coordinate transformation suitable for
fsaverage.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if fs_home is None:
fs_home = get_config('FREESURFER_HOME', fs_home)
if fs_home is None:
raise ValueError(
"FREESURFER_HOME environment variable not found. Please "
"specify the fs_home parameter in your call to "
"create_default_subject().")
if mne_root is None:
mne_root = get_config('MNE_ROOT', mne_root)
if mne_root is None:
raise ValueError("MNE_ROOT environment variable not found. Please "
"specify the mne_root parameter in your call to "
"create_default_subject().")
# make sure freesurfer files exist
fs_src = os.path.join(fs_home, 'subjects', 'fsaverage')
if not os.path.exists(fs_src):
raise IOError('fsaverage not found at %r. Is fs_home specified '
'correctly?' % fs_src)
for name in ('label', 'mri', 'surf'):
dirname = os.path.join(fs_src, name)
if not os.path.isdir(dirname):
raise IOError("Freesurfer fsaverage seems to be incomplete: No "
"directory named %s found in %s" % (name, fs_src))
# make sure destination does not already exist
dest = os.path.join(subjects_dir, 'fsaverage')
if dest == fs_src:
raise IOError(
"Your subjects_dir points to the freesurfer subjects_dir (%r). "
"The default subject can not be created in the freesurfer "
"installation directory; please specify a different "
"subjects_dir." % subjects_dir)
elif (not update) and os.path.exists(dest):
raise IOError(
"Can not create fsaverage because %r already exists in "
"subjects_dir %r. Delete or rename the existing fsaverage "
"subject folder." % ('fsaverage', subjects_dir))
# make sure mne files exist
mne_fname = os.path.join(mne_root, 'share', 'mne', 'mne_analyze',
'fsaverage', 'fsaverage-%s.fif')
mne_files = ('fiducials', 'head', 'inner_skull-bem', 'trans')
for name in mne_files:
fname = mne_fname % name
if not os.path.isfile(fname):
raise IOError("MNE fsaverage incomplete: %s file not found at "
"%s" % (name, fname))
# copy fsaverage from freesurfer
logger.info("Copying fsaverage subject from freesurfer directory...")
if (not update) or not os.path.exists(dest):
shutil.copytree(fs_src, dest)
_make_writable_recursive(dest)
# add files from mne
dest_bem = os.path.join(dest, 'bem')
if not os.path.exists(dest_bem):
os.mkdir(dest_bem)
logger.info("Copying auxiliary fsaverage files from mne directory...")
dest_fname = os.path.join(dest_bem, 'fsaverage-%s.fif')
_make_writable_recursive(dest_bem)
for name in mne_files:
if not os.path.exists(dest_fname % name):
shutil.copy(mne_fname % name, dest_bem)
def _decimate_points(pts, res=10):
"""Decimate the number of points using a voxel grid
Create a voxel grid with a specified resolution and retain at most one
point per voxel. For each voxel, the point closest to its center is
retained.
Parameters
----------
pts : array, shape (n_points, 3)
The points making up the head shape.
res : scalar
The resolution of the voxel space (side length of each voxel).
Returns
-------
pts : array, shape = (n_points, 3)
The decimated points.
"""
from scipy.spatial.distance import cdist
pts = np.asarray(pts)
# find the bin edges for the voxel space
xmin, ymin, zmin = pts.min(0) - res / 2.
xmax, ymax, zmax = pts.max(0) + res
xax = np.arange(xmin, xmax, res)
yax = np.arange(ymin, ymax, res)
zax = np.arange(zmin, zmax, res)
# find voxels containing one or more point
H, _ = np.histogramdd(pts, bins=(xax, yax, zax), normed=False)
# for each voxel, select one point
X, Y, Z = pts.T
out = np.empty((np.sum(H > 0), 3))
for i, (xbin, ybin, zbin) in enumerate(zip(*np.nonzero(H))):
x = xax[xbin]
y = yax[ybin]
z = zax[zbin]
xi = np.logical_and(X >= x, X < x + res)
yi = np.logical_and(Y >= y, Y < y + res)
zi = np.logical_and(Z >= z, Z < z + res)
idx = np.logical_and(zi, np.logical_and(yi, xi))
ipts = pts[idx]
mid = np.array([x, y, z]) + res / 2.
dist = cdist(ipts, [mid])
i_min = np.argmin(dist)
ipt = ipts[i_min]
out[i] = ipt
return out
def _trans_from_params(param_info, params):
"""Convert transformation parameters into a transformation matrix
Parameters
----------
param_info : tuple, len = 3
Tuple describing the parameters in x (do_translate, do_rotate,
do_scale).
params : tuple
The transformation parameters.
Returns
-------
trans : array, shape = (4, 4)
Transformation matrix.
"""
do_rotate, do_translate, do_scale = param_info
i = 0
trans = []
if do_rotate:
x, y, z = params[:3]
trans.append(rotation(x, y, z))
i += 3
if do_translate:
x, y, z = params[i:i + 3]
trans.insert(0, translation(x, y, z))
i += 3
if do_scale == 1:
s = params[i]
trans.append(scaling(s, s, s))
elif do_scale == 3:
x, y, z = params[i:i + 3]
trans.append(scaling(x, y, z))
trans = reduce(dot, trans)
return trans
def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,
scale=False, tol=None, x0=None, out='trans'):
"""Find a transform that minimizes the squared distance between two
matching sets of points.
Uses :func:`scipy.optimize.leastsq` to find a transformation involving
a combination of rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (n, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : bool
Number of scaling parameters. With False, points are not scaled. With
True, points are scaled by the same factor along all axes.
tol : scalar | None
The error tolerance. If the distance between any of the matched points
exceeds this value in the solution, a RuntimeError is raised. With
None, no error check is performed.
x0 : None | tuple
Initial values for the fit parameters.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
One of the following, depending on the ``out`` parameter:
trans : array, shape = (4, 4)
Transformation that, if applied to src_pts, minimizes the squared
distance to tgt_pts.
params : array, shape = (n_params, )
A single tuple containing the translation, rotation and scaling
parameters in that order.
"""
from scipy.optimize import leastsq
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
if src_pts.shape != tgt_pts.shape:
raise ValueError("src_pts and tgt_pts must have same shape (got "
"{0}, {1})".format(src_pts.shape, tgt_pts.shape))
rotate = bool(rotate)
translate = bool(translate)
scale = int(scale)
if translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
param_info = (rotate, translate, scale)
if param_info == (True, False, 0):
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = dot(src_pts, trans.T)
return (tgt_pts - est).ravel()
if x0 is None:
x0 = (0, 0, 0)
elif param_info == (True, False, 1):
def error(x):
rx, ry, rz, s = x
trans = rotation3d(rx, ry, rz) * s
est = dot(src_pts, trans.T)
return (tgt_pts - est).ravel()
if x0 is None:
x0 = (0, 0, 0, 1)
elif param_info == (True, True, 0):
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = dot(src_pts, trans.T)
return (tgt_pts - est[:, :3]).ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0)
elif param_info == (True, True, 1):
def error(x):
rx, ry, rz, tx, ty, tz, s = x
trans = reduce(dot, (translation(tx, ty, tz), rotation(rx, ry, rz),
scaling(s, s, s)))
est = dot(src_pts, trans.T)
return (tgt_pts - est[:, :3]).ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0, 1)
else:
raise NotImplementedError(
"The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
x, _, _, _, _ = leastsq(error, x0, full_output=True)
# re-create the final transformation matrix
if (tol is not None) or (out == 'trans'):
trans = _trans_from_params(param_info, x)
# assess the error of the solution
if tol is not None:
if not translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
est_pts = dot(src_pts, trans.T)[:, :3]
err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1))
if np.any(err > tol):
raise RuntimeError("Error exceeds tolerance. Error = %r" % err)
if out == 'params':
return x
elif out == 'trans':
return trans
else:
raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
def _point_cloud_error(src_pts, tgt_pts):
"""Find the distance from each source point to its closest target point
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_pts : array, shape = (m, 3)
Target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
from scipy.spatial.distance import cdist
Y = cdist(src_pts, tgt_pts, 'euclidean')
dist = Y.min(axis=1)
return dist
def _point_cloud_error_balltree(src_pts, tgt_tree):
"""Find the distance from each source point to its closest target point
Uses sklearn.neighbors.BallTree for greater efficiency
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_tree : sklearn.neighbors.BallTree
BallTree of the target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
dist, _ = tgt_tree.query(src_pts)
return dist.ravel()
def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
scale=0, x0=None, leastsq_args={}, out='params'):
"""Find a transform that minimizes the squared distance from each source
point to its closest target point
Uses :func:`scipy.optimize.leastsq` to find a transformation involving
a combination of rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (m, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : 0 | 1 | 3
Number of scaling parameters. With 0, points are not scaled. With 1,
points are scaled by the same factor along all axes. With 3, points are
scaled by a separate factor along each axis.
x0 : None | tuple
Initial values for the fit parameters.
leastsq_args : dict
Additional parameters to submit to :func:`scipy.optimize.leastsq`.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
x : array, shape = (n_params, )
Estimated parameters for the transformation.
Notes
-----
Assumes that the target points form a dense enough point cloud so that
the distance of each src_pt to the closest tgt_pt can be used as an
estimate of the distance of src_pt to tgt_pts.
"""
from scipy.optimize import leastsq
kwargs = {'epsfcn': 0.01}
kwargs.update(leastsq_args)
# assert correct argument types
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
translate = bool(translate)
rotate = bool(rotate)
scale = int(scale)
if translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
try:
from sklearn.neighbors import BallTree
tgt_pts = BallTree(tgt_pts)
errfunc = _point_cloud_error_balltree
except ImportError:
warn("Sklearn could not be imported. Fitting points will be slower. "
"To improve performance, install the sklearn module.")
errfunc = _point_cloud_error
# for efficiency, define parameter specific error function
param_info = (rotate, translate, scale)
if param_info == (True, False, 0):
x0 = x0 or (0, 0, 0)
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, False, 1):
x0 = x0 or (0, 0, 0, 1)
def error(x):
rx, ry, rz, s = x
trans = rotation3d(rx, ry, rz) * s
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, False, 3):
x0 = x0 or (0, 0, 0, 1, 1, 1)
def error(x):
rx, ry, rz, sx, sy, sz = x
trans = rotation3d(rx, ry, rz) * [sx, sy, sz]
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, True, 0):
x0 = x0 or (0, 0, 0, 0, 0, 0)
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = dot(src_pts, trans.T)
err = errfunc(est[:, :3], tgt_pts)
return err
else:
raise NotImplementedError(
"The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
est, _, info, msg, _ = leastsq(error, x0, full_output=True, **kwargs)
logger.debug("fit_point_cloud leastsq (%i calls) info: %s", info['nfev'],
msg)
if out == 'params':
return est
elif out == 'trans':
return _trans_from_params(param_info, est)
else:
raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
def _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None):
"""Find paths to label files in a subject's label directory
Parameters
----------
subject : str
Name of the mri subject.
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "aparc/*.label" will find all labels
in the "subject/label/aparc" directory). With None, find all labels.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
------
paths : list
List of paths relative to the subject's label directory
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
subject_dir = os.path.join(subjects_dir, subject)
lbl_dir = os.path.join(subject_dir, 'label')
if pattern is None:
paths = []
for dirpath, _, filenames in os.walk(lbl_dir):
rel_dir = os.path.relpath(dirpath, lbl_dir)
for filename in fnmatch.filter(filenames, '*.label'):
path = os.path.join(rel_dir, filename)
paths.append(path)
else:
paths = [os.path.relpath(path, lbl_dir) for path in iglob(pattern)]
return paths
def _find_mri_paths(subject='fsaverage', subjects_dir=None):
"""Find all files of an mri relevant for source transformation
Parameters
----------
subject : str
Name of the mri subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
-------
paths | dict
Dictionary whose keys are relevant file type names (str), and whose
values are lists of paths.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = {}
# directories to create
paths['dirs'] = [bem_dirname, surf_dirname]
# surf/ files
paths['surf'] = surf = []
surf_fname = os.path.join(surf_dirname, '{name}')
surf_names = ('inflated', 'sphere', 'sphere.reg', 'white')
if os.getenv('_MNE_FEW_SURFACES', '') != 'true': # for testing
surf_names = surf_names + (
'orig', 'orig_avg', 'inflated_avg', 'inflated_pre', 'pial',
'pial_avg', 'smoothwm', 'white_avg', 'sphere.reg.avg')
for name in surf_names:
for hemi in ('lh.', 'rh.'):
fname = pformat(surf_fname, name=hemi + name)
surf.append(fname)
# BEM files
paths['bem'] = bem = []
path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
if os.path.exists(path):
bem.append('head')
bem_pattern = pformat(bem_fname, subjects_dir=subjects_dir,
subject=subject, name='*-bem')
re_pattern = pformat(bem_fname, subjects_dir=subjects_dir, subject=subject,
name='(.+)')
for path in iglob(bem_pattern):
match = re.match(re_pattern, path)
name = match.group(1)
bem.append(name)
# fiducials
paths['fid'] = [fid_fname]
# duplicate curvature files
paths['duplicate'] = dup = []
path = os.path.join(surf_dirname, '{name}')
for name in ['lh.curv', 'rh.curv']:
fname = pformat(path, name=name)
dup.append(fname)
# check presence of required files
for ftype in ['surf', 'fid', 'duplicate']:
for fname in paths[ftype]:
path = fname.format(subjects_dir=subjects_dir, subject=subject)
path = os.path.realpath(path)
if not os.path.exists(path):
raise IOError("Required file not found: %r" % path)
# find source space files
paths['src'] = src = []
bem_dir = bem_dirname.format(subjects_dir=subjects_dir, subject=subject)
fnames = fnmatch.filter(os.listdir(bem_dir), '*-src.fif')
prefix = subject + '-'
for fname in fnames:
if fname.startswith(prefix):
fname = "{subject}-%s" % fname[len(prefix):]
path = os.path.join(bem_dirname, fname)
src.append(path)
return paths
def _is_mri_subject(subject, subjects_dir=None):
"""Check whether a directory in subjects_dir is an mri subject directory
Parameters
----------
subject : str
Name of the potential subject/directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
is_mri_subject : bool
Whether ``subject`` is an mri subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
if not os.path.exists(fname):
return False
return True
def _mri_subject_has_bem(subject, subjects_dir=None):
"""Check whether an mri subject has a file matching the bem pattern
Parameters
----------
subject : str
Name of the subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
has_bem_file : bool
Whether ``subject`` has a bem file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject,
name='*-bem')
fnames = glob(pattern)
return bool(len(fnames))
def read_mri_cfg(subject, subjects_dir=None):
"""Read information from the cfg file of a scaled MRI brain
Parameters
----------
subject : str
Name of the scaled MRI subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
cfg : dict
Dictionary with entries from the MRI's cfg file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')
if not os.path.exists(fname):
raise IOError("%r does not seem to be a scaled mri subject: %r does "
"not exist." % (subject, fname))
logger.info("Reading MRI cfg file %s" % fname)
config = configparser.RawConfigParser()
config.read(fname)
n_params = config.getint("MRI Scaling", 'n_params')
if n_params == 1:
scale = config.getfloat("MRI Scaling", 'scale')
elif n_params == 3:
scale_str = config.get("MRI Scaling", 'scale')
scale = np.array([float(s) for s in scale_str.split()])
else:
raise ValueError("Invalid n_params value in MRI cfg: %i" % n_params)
out = {'subject_from': config.get("MRI Scaling", 'subject_from'),
'n_params': n_params, 'scale': scale}
return out
def _write_mri_config(fname, subject_from, subject_to, scale):
"""Write the cfg file describing a scaled MRI subject
Parameters
----------
fname : str
Target file.
subject_from : str
Name of the source MRI subject.
subject_to : str
Name of the scaled MRI subject.
scale : float | array_like, shape = (3,)
The scaling parameter.
"""
scale = np.asarray(scale)
if np.isscalar(scale) or scale.shape == ():
n_params = 1
else:
n_params = 3
config = configparser.RawConfigParser()
config.add_section("MRI Scaling")
config.set("MRI Scaling", 'subject_from', subject_from)
config.set("MRI Scaling", 'subject_to', subject_to)
config.set("MRI Scaling", 'n_params', str(n_params))
if n_params == 1:
config.set("MRI Scaling", 'scale', str(scale))
else:
config.set("MRI Scaling", 'scale', ' '.join([str(s) for s in scale]))
config.set("MRI Scaling", 'version', '1')
with open(fname, 'w') as fid:
config.write(fid)
def _scale_params(subject_to, subject_from, scale, subjects_dir):
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if (subject_from is None) != (scale is None):
raise TypeError("Need to provide either both subject_from and scale "
"parameters, or neither.")
if subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
subject_from = cfg['subject_from']
n_params = cfg['n_params']
scale = cfg['scale']
else:
scale = np.asarray(scale)
if scale.ndim == 0:
n_params = 1
elif scale.shape == (3,):
n_params = 3
else:
raise ValueError("Invalid shape for scale parameer. Need scalar "
"or array of length 3. Got %s." % str(scale))
return subjects_dir, subject_from, n_params, scale
def scale_bem(subject_to, bem_name, subject_from=None, scale=None,
subjects_dir=None):
"""Scale a bem file
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
bem_name : str
Name of the bem file. For example, to scale
``fsaverage-inner_skull-bem.fif``, the bem_name would be
"inner_skull-bem".
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
subjects_dir, subject_from, _, scale = _scale_params(subject_to,
subject_from, scale,
subjects_dir)
src = bem_fname.format(subjects_dir=subjects_dir, subject=subject_from,
name=bem_name)
dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to,
name=bem_name)
if os.path.exists(dst):
raise IOError("File alredy exists: %s" % dst)
surfs = read_bem_surfaces(src)
if len(surfs) != 1:
raise NotImplementedError("BEM file with more than one surface: %r"
% src)
surf0 = surfs[0]
surf0['rr'] = surf0['rr'] * scale
write_bem_surfaces(dst, surf0)
def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,
scale=None, subjects_dir=None):
"""Scale labels to match a brain that was previously created by scaling
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination brain).
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "lh.BA3a.label" will scale
"fsaverage/label/lh.BA3a.label"; "aparc/\*.label" will find all labels
in the "fsaverage/label/aparc" directory). With None, scale all labels.
overwrite : bool
Overwrite any label file that already exists for subject_to (otherwise
existsing labels are skipped).
subject_from : None | str
Name of the original MRI subject (the brain that was scaled to create
subject_to). If None, the value is read from subject_to's cfg file.
scale : None | float | array_like, shape = (3,)
Scaling parameter. If None, the value is read from subject_to's cfg
file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
# read parameters from cfg
if scale is None or subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
if subject_from is None:
subject_from = cfg['subject_from']
if scale is None:
scale = cfg['scale']
# find labels
paths = _find_label_paths(subject_from, pattern, subjects_dir)
if not paths:
return
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_root = os.path.join(subjects_dir, subject_from, 'label')
dst_root = os.path.join(subjects_dir, subject_to, 'label')
# scale labels
for fname in paths:
dst = os.path.join(dst_root, fname)
if not overwrite and os.path.exists(dst):
continue
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
src = os.path.join(src_root, fname)
l_old = read_label(src)
pos = l_old.pos * scale
l_new = Label(l_old.vertices, pos, l_old.values, l_old.hemi,
l_old.comment, subject=subject_to)
l_new.save(dst)
def scale_mri(subject_from, subject_to, scale, overwrite=False,
subjects_dir=None):
"""Create a scaled copy of an MRI subject
Parameters
----------
subject_from : str
Name of the subject providing the MRI.
subject_to : str
New subject name for which to save the scaled MRI.
scale : float | array_like, shape = (3,)
The scaling factor (one or 3 parameters).
overwrite : bool
If an MRI already exists for subject_to, overwrite it.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
See Also
--------
scale_labels : add labels to a scaled MRI
scale_source_space : add a source space to a scaled MRI
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = _find_mri_paths(subject_from, subjects_dir=subjects_dir)
scale = np.asarray(scale)
# make sure we have an empty target directory
dest = subject_dirname.format(subject=subject_to,
subjects_dir=subjects_dir)
if os.path.exists(dest):
if overwrite:
shutil.rmtree(dest)
else:
raise IOError("Subject directory for %s already exists: %r"
% (subject_to, dest))
for dirname in paths['dirs']:
dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir)
os.makedirs(dir_)
# save MRI scaling parameters
fname = os.path.join(dest, 'MRI scaling parameters.cfg')
_write_mri_config(fname, subject_from, subject_to, scale)
# surf files [in mm]
for fname in paths['surf']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
pts, tri = read_surface(src)
write_surface(dest, pts * scale, tri)
# BEM files [in m]
for bem_name in paths['bem']:
scale_bem(subject_to, bem_name, subject_from, scale, subjects_dir)
# fiducials [in m]
for fname in paths['fid']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
pts, cframe = read_fiducials(src)
for pt in pts:
pt['r'] = pt['r'] * scale
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
write_fiducials(dest, pts, cframe)
# duplicate files
for fname in paths['duplicate']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
shutil.copyfile(src, dest)
# source spaces
for fname in paths['src']:
src_name = os.path.basename(fname)
scale_source_space(subject_to, src_name, subject_from, scale,
subjects_dir)
# labels [in m]
scale_labels(subject_to, subject_from=subject_from, scale=scale,
subjects_dir=subjects_dir)
def scale_source_space(subject_to, src_name, subject_from=None, scale=None,
subjects_dir=None, n_jobs=1):
"""Scale a source space for an mri created with scale_mri()
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
src_name : str
Source space name. Can be a spacing parameter (e.g., ``'7'``,
``'ico4'``, ``'oct6'``) or a file name of a source space file relative
to the bem directory; if the file name contains the subject name, it
should be indicated as "{subject}" in ``src_name`` (e.g.,
``"{subject}-my_source_space-src.fif"``).
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
n_jobs : int
Number of jobs to run in parallel if recomputing distances (only
applies if scale is an array of length 3, and will not use more cores
than there are source spaces).
"""
subjects_dir, subject_from, n_params, scale = _scale_params(subject_to,
subject_from,
scale,
subjects_dir)
# find the source space file names
if src_name.isdigit():
spacing = src_name # spacing in mm
src_pattern = src_fname
else:
match = re.match("(oct|ico)-?(\d+)$", src_name)
if match:
spacing = '-'.join(match.groups())
src_pattern = src_fname
else:
spacing = None
src_pattern = os.path.join(bem_dirname, src_name)
src = src_pattern.format(subjects_dir=subjects_dir, subject=subject_from,
spacing=spacing)
dst = src_pattern.format(subjects_dir=subjects_dir, subject=subject_to,
spacing=spacing)
# prepare scaling parameters
if n_params == 1:
norm_scale = None
elif n_params == 3:
norm_scale = 1. / scale
else:
raise RuntimeError("Invalid n_params entry in MRI cfg file: %s"
% str(n_params))
# read and scale the source space [in m]
sss = read_source_spaces(src)
logger.info("scaling source space %s: %s -> %s", spacing, subject_from,
subject_to)
logger.info("Scale factor: %s", scale)
add_dist = False
for ss in sss:
ss['subject_his_id'] = subject_to
ss['rr'] *= scale
# distances and patch info
if norm_scale is None:
if ss['dist'] is not None:
ss['dist'] *= scale
ss['nearest_dist'] *= scale
ss['dist_limit'] *= scale
else:
nn = ss['nn']
nn *= norm_scale
norm = np.sqrt(np.sum(nn ** 2, 1))
nn /= norm[:, np.newaxis]
if ss['dist'] is not None:
add_dist = True
if add_dist:
logger.info("Recomputing distances, this might take a while")
dist_limit = np.asscalar(sss[0]['dist_limit'])
add_source_space_distances(sss, dist_limit, n_jobs)
write_source_spaces(dst, sss)
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 67 | 9084 | import numpy as np
from sklearn.utils import check_array
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
Xf = check_array(X, order='F')
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
neurohackweek/kids_rsfMRI_motion | SCRIPTS/kidsmotion_plotting.py | 1 | 4401 | #!/usr/bin/env python
"""
Here are some useful plotting functions!
"""
#===============================================================================
# Import what you need
#===============================================================================
import matplotlib.pylab as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
import pandas as pd
import seaborn as sns
#===============================================================================
def get_min_max(data, pad=0.05):
"""
This function finds the minimum and maximum values for the data and then
pads values by pad (in fractional values, set to 5% as default) of the data
so you can have a little bit of space around the values in your plot.
"""
data_range = np.max(data) - np.min(data)
data_min = np.min(data) - (data_range * pad)
data_max = np.max(data) + (data_range * pad)
return data_min, data_max
#===============================================================================
def histogram_motion(df):
"""
This function plots histograms of the func_mean_fd and func_perc_md values
for all participants in a data frame along with a scatter plot of these
two motion measures against each other.
Returns the fig and ax_list
"""
fig, ax_list = plt.subplots(1,3, figsize=(9,3))
ax_list[0] = sns.distplot(df['func_mean_fd'], ax=ax_list[0])
xmin, xmax = get_min_max(df['func_mean_fd'])
ax_list[0].set_xlim(xmin, xmax)
ax_list[1] = sns.distplot(df['func_perc_fd'], ax=ax_list[1])
xmin, xmax = get_min_max(df['func_perc_fd'])
ax_list[1].set_xlim(xmin, xmax)
ax_list[2] = sns.regplot(df['func_mean_fd'], df['func_perc_fd'], fit_reg=False, scatter_kws={'s': 3}, ax=ax_list[2])
xmin, xmax = get_min_max(df['func_mean_fd'])
ymin, ymax = get_min_max(df['func_perc_fd'])
ax_list[2].set_xlim(xmin, xmax)
ax_list[2].set_ylim(ymin, ymax)
for ax in ax_list:
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
sns.despine()
plt.tight_layout()
return fig, ax_list
#===============================================================================
def corr_motion_age(df, fit_reg=True):
"""
This function correlates age and motion (both func_mean_fd and func_perc_md)
for all participants in a data frame.
Returns the fig and ax_list
"""
fig, ax_list = plt.subplots(1,2, figsize=(6,3))
for i, motion_measure in enumerate([ 'func_mean_fd', 'func_perc_fd']):
ax_list[i] = sns.regplot(df['AGE_AT_SCAN'], df[motion_measure],
fit_reg=fit_reg,
scatter_kws={'s': 3}, ax=ax_list[i])
xmin, xmax = get_min_max(df['AGE_AT_SCAN'])
ymin, ymax = get_min_max(df[motion_measure])
ax_list[i].set_xlim(xmin, xmax)
ax_list[i].set_ylim(ymin, ymax)
for ax in ax_list:
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
sns.despine()
plt.tight_layout()
return fig, ax_list
#===============================================================================
def compare_groups_boxplots(corr_df, title=None):
"""
This function plots the output of the compare_groups function (which
doesn't exist yet) as swarm and boxplots showing the correlation with a
particular variable.
corr_df is made up of rows contains the r values from a number of permutations and columns defined according to different ways of selecting
the groups.
"""
fig, ax = plt.subplots(figsize=(5,3))
ax = sns.boxplot(data=corr_df,
orient='v',
ax=ax,
linewidth=2,
color='w',
width=0.5)
ax = sns.swarmplot(data=corr_df,
orient='v',
ax=ax,
s=2)
ax.axhline(c='k', lw=0.5, ls='--')
sns.despine()
if title:
ax.text(0.95, 0.95, title,
horizontalalignment='right',
verticalalignment='bottom',
transform = ax.transAxes)
# Make the plot look pretty by limiting the numbmer of ticks on the
# yaxis
ax.yaxis.set_major_locator(MaxNLocator(5))
plt.tight_layout()
return fig, ax
| mit |
luhaofang/tripletloss | tripletloss/norm2layer.py | 1 | 1368 | # --------------------------------------------------------
# TRIPLET LOSS
# Copyright (c) 2015 Pinguo Tech.
# Written by David Lu
# --------------------------------------------------------
"""The data layer used during training a VGG_FACE network by triplet loss.
"""
import caffe
import numpy as np
from numpy import *
import yaml
from multiprocessing import Process, Queue
from caffe._caffe import RawBlobVec
from sklearn import preprocessing
class Norm2Layer(caffe.Layer):
"""norm2 layer used for L2 normalization."""
def setup(self, bottom, top):
"""Setup the TripletDataLayer."""
top[0].reshape(bottom[0].num, shape(bottom[0].data)[1])
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
minibatch_db = []
for i in range((bottom[0]).num):
X_normalized = preprocessing.normalize(bottom[0].data[i].reshape(1,-1), norm='l2')[0]
minibatch_db.append(X_normalized)
#print 'bottom**:',np.dot(bottom[0].data[0],bottom[0].data[0])
top[0].data[...] = minibatch_db
def backward(self, top, propagate_down, bottom):
"""This layer does not need to backward propogate gradient"""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
| mit |
gfyoung/pandas | pandas/core/arrays/sparse/scipy_sparse.py | 4 | 5379 | """
Interaction with scipy.sparse matrices.
Currently only includes to_coo helpers.
"""
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.series import Series
def _check_is_partition(parts, whole):
whole = set(whole)
parts = [set(x) for x in parts]
if set.intersection(*parts) != set():
raise ValueError("Is not a partition because intersection is not null.")
if set.union(*parts) != whole:
raise ValueError("Is not a partition because union is not the whole.")
def _to_ijv(ss, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
For arbitrary (MultiIndexed) sparse Series return
(v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for
passing to scipy.sparse.coo constructor.
"""
# index and column levels must be a partition of the index
_check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
# from the sparse Series: get the labels and data for non-null entries
values = ss.array._valid_sp_values
nonnull_labels = ss.dropna()
def get_indexers(levels):
""" Return sparse coords and dense labels for subset levels """
# TODO: how to do this better? cleanly slice nonnull_labels given the
# coord
values_ilabels = [tuple(x[i] for i in levels) for x in nonnull_labels.index]
if len(levels) == 1:
values_ilabels = [x[0] for x in values_ilabels]
# # performance issues with groupby ###################################
# TODO: these two lines can replace the code below but
# groupby is too slow (in some cases at least)
# labels_to_i = ss.groupby(level=levels, sort=sort_labels).first()
# labels_to_i[:] = np.arange(labels_to_i.shape[0])
def _get_label_to_i_dict(labels, sort_labels=False):
"""
Return dict of unique labels to number.
Optionally sort by label.
"""
labels = Index(map(tuple, labels)).unique().tolist() # squish
if sort_labels:
labels = sorted(labels)
return {k: i for i, k in enumerate(labels)}
def _get_index_subset_to_coord_dict(index, subset, sort_labels=False):
ilabels = list(zip(*[index._get_level_values(i) for i in subset]))
labels_to_i = _get_label_to_i_dict(ilabels, sort_labels=sort_labels)
labels_to_i = Series(labels_to_i)
if len(subset) > 1:
labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index)
labels_to_i.index.names = [index.names[i] for i in subset]
else:
labels_to_i.index = Index(x[0] for x in labels_to_i.index)
labels_to_i.index.name = index.names[subset[0]]
labels_to_i.name = "value"
return labels_to_i
labels_to_i = _get_index_subset_to_coord_dict(
ss.index, levels, sort_labels=sort_labels
)
# #####################################################################
# #####################################################################
i_coord = labels_to_i[values_ilabels].tolist()
i_labels = labels_to_i.index.tolist()
return i_coord, i_labels
i_coord, i_labels = get_indexers(row_levels)
j_coord, j_labels = get_indexers(column_levels)
return values, i_coord, j_coord, i_labels, j_labels
def sparse_series_to_coo(ss, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
Convert a sparse Series to a scipy.sparse.coo_matrix using index
levels row_levels, column_levels as the row and column
labels respectively. Returns the sparse_matrix, row and column labels.
"""
import scipy.sparse
if ss.index.nlevels < 2:
raise ValueError("to_coo requires MultiIndex with nlevels > 2")
if not ss.index.is_unique:
raise ValueError(
"Duplicate index entries are not allowed in to_coo transformation."
)
# to keep things simple, only rely on integer indexing (not labels)
row_levels = [ss.index._get_level_number(x) for x in row_levels]
column_levels = [ss.index._get_level_number(x) for x in column_levels]
v, i, j, rows, columns = _to_ijv(
ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels
)
sparse_matrix = scipy.sparse.coo_matrix(
(v, (i, j)), shape=(len(rows), len(columns))
)
return sparse_matrix, rows, columns
def coo_to_sparse_series(A, dense_index: bool = False):
"""
Convert a scipy.sparse.coo_matrix to a SparseSeries.
Parameters
----------
A : scipy.sparse.coo.coo_matrix
dense_index : bool, default False
Returns
-------
Series
Raises
------
TypeError if A is not a coo_matrix
"""
from pandas import SparseDtype
try:
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col)))
except AttributeError as err:
raise TypeError(
f"Expected coo_matrix. Got {type(A).__name__} instead."
) from err
s = s.sort_index()
s = s.astype(SparseDtype(s.dtype))
if dense_index:
# is there a better constructor method to use here?
i = range(A.shape[0])
j = range(A.shape[1])
ind = MultiIndex.from_product([i, j])
s = s.reindex(ind)
return s
| bsd-3-clause |
linebp/pandas | pandas/tests/io/test_common.py | 2 | 8235 | """
Tests for the pandas.io.common functionalities
"""
import mmap
import pytest
import os
from os.path import isabs
import pandas as pd
import pandas.util.testing as tm
from pandas.io import common
from pandas.compat import is_platform_windows, StringIO
from pandas import read_csv, concat
try:
from pathlib import Path
except ImportError:
pass
try:
from py.path import local as LocalPath
except ImportError:
pass
class CustomFSPath(object):
"""For testing fspath on unknown objects"""
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
HERE = os.path.dirname(__file__)
class TestCommonIOCapabilities(object):
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_expand_user(self):
filename = '~/sometest'
expanded_name = common._expand_user(filename)
assert expanded_name != filename
assert isabs(expanded_name)
assert os.path.expanduser(filename) == expanded_name
def test_expand_user_normal_path(self):
filename = '/somefolder/sometest'
expanded_name = common._expand_user(filename)
assert expanded_name == filename
assert os.path.expanduser(filename) == expanded_name
def test_stringify_path_pathlib(self):
tm._skip_if_no_pathlib()
rel_path = common._stringify_path(Path('.'))
assert rel_path == '.'
redundant_path = common._stringify_path(Path('foo//bar'))
assert redundant_path == os.path.join('foo', 'bar')
def test_stringify_path_localpath(self):
tm._skip_if_no_localpath()
path = os.path.join('foo', 'bar')
abs_path = os.path.abspath(path)
lpath = LocalPath(path)
assert common._stringify_path(lpath) == abs_path
def test_stringify_path_fspath(self):
p = CustomFSPath('foo/bar.csv')
result = common._stringify_path(p)
assert result == 'foo/bar.csv'
def test_get_filepath_or_buffer_with_path(self):
filename = '~/sometest'
filepath_or_buffer, _, _ = common.get_filepath_or_buffer(filename)
assert filepath_or_buffer != filename
assert isabs(filepath_or_buffer)
assert os.path.expanduser(filename) == filepath_or_buffer
def test_get_filepath_or_buffer_with_buffer(self):
input_buffer = StringIO()
filepath_or_buffer, _, _ = common.get_filepath_or_buffer(input_buffer)
assert filepath_or_buffer == input_buffer
def test_iterator(self):
reader = read_csv(StringIO(self.data1), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(self.data1))
tm.assert_frame_equal(result, expected)
# GH12153
it = read_csv(StringIO(self.data1), chunksize=1)
first = next(it)
tm.assert_frame_equal(first, expected.iloc[[0]])
tm.assert_frame_equal(concat(it), expected.iloc[1:])
@pytest.mark.parametrize('reader, module, path', [
(pd.read_csv, 'os', os.path.join(HERE, 'data', 'iris.csv')),
(pd.read_table, 'os', os.path.join(HERE, 'data', 'iris.csv')),
(pd.read_fwf, 'os', os.path.join(HERE, 'data',
'fixed_width_format.txt')),
(pd.read_excel, 'xlrd', os.path.join(HERE, 'data', 'test1.xlsx')),
(pd.read_feather, 'feather', os.path.join(HERE, 'data',
'feather-0_3_1.feather')),
(pd.read_hdf, 'tables', os.path.join(HERE, 'data', 'legacy_hdf',
'datetimetz_object.h5')),
(pd.read_stata, 'os', os.path.join(HERE, 'data', 'stata10_115.dta')),
(pd.read_sas, 'os', os.path.join(HERE, 'sas', 'data',
'test1.sas7bdat')),
(pd.read_json, 'os', os.path.join(HERE, 'json', 'data',
'tsframe_v012.json')),
(pd.read_msgpack, 'os', os.path.join(HERE, 'msgpack', 'data',
'frame.mp')),
(pd.read_pickle, 'os', os.path.join(HERE, 'data',
'categorical_0_14_1.pickle')),
])
def test_read_fspath_all(self, reader, module, path):
pytest.importorskip(module)
mypath = CustomFSPath(path)
result = reader(mypath)
expected = reader(path)
if path.endswith('.pickle'):
# categorical
tm.assert_categorical_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('writer_name, writer_kwargs, module', [
('to_csv', {}, 'os'),
('to_excel', {'engine': 'xlwt'}, 'xlwt'),
('to_feather', {}, 'feather'),
('to_html', {}, 'os'),
('to_json', {}, 'os'),
('to_latex', {}, 'os'),
('to_msgpack', {}, 'os'),
('to_pickle', {}, 'os'),
('to_stata', {}, 'os'),
])
def test_write_fspath_all(self, writer_name, writer_kwargs, module):
p1 = tm.ensure_clean('string')
p2 = tm.ensure_clean('fspath')
df = pd.DataFrame({"A": [1, 2]})
with p1 as string, p2 as fspath:
pytest.importorskip(module)
mypath = CustomFSPath(fspath)
writer = getattr(df, writer_name)
writer(string, **writer_kwargs)
with open(string, 'rb') as f:
expected = f.read()
writer(mypath, **writer_kwargs)
with open(fspath, 'rb') as f:
result = f.read()
assert result == expected
def test_write_fspath_hdf5(self):
# Same test as write_fspath_all, except HDF5 files aren't
# necessarily byte-for-byte identical for a given dataframe, so we'll
# have to read and compare equality
pytest.importorskip('tables')
df = pd.DataFrame({"A": [1, 2]})
p1 = tm.ensure_clean('string')
p2 = tm.ensure_clean('fspath')
with p1 as string, p2 as fspath:
mypath = CustomFSPath(fspath)
df.to_hdf(mypath, key='bar')
df.to_hdf(string, key='bar')
result = pd.read_hdf(fspath, key='bar')
expected = pd.read_hdf(string, key='bar')
tm.assert_frame_equal(result, expected)
class TestMMapWrapper(object):
def setup_method(self, method):
self.mmap_file = os.path.join(tm.get_data_path(),
'test_mmap.csv')
def test_constructor_bad_file(self):
non_file = StringIO('I am not a file')
non_file.fileno = lambda: -1
# the error raised is different on Windows
if is_platform_windows():
msg = "The parameter is incorrect"
err = OSError
else:
msg = "[Errno 22]"
err = mmap.error
tm.assert_raises_regex(err, msg, common.MMapWrapper, non_file)
target = open(self.mmap_file, 'r')
target.close()
msg = "I/O operation on closed file"
tm.assert_raises_regex(
ValueError, msg, common.MMapWrapper, target)
def test_get_attr(self):
with open(self.mmap_file, 'r') as target:
wrapper = common.MMapWrapper(target)
attrs = dir(wrapper.mmap)
attrs = [attr for attr in attrs
if not attr.startswith('__')]
attrs.append('__next__')
for attr in attrs:
assert hasattr(wrapper, attr)
assert not hasattr(wrapper, 'foo')
def test_next(self):
with open(self.mmap_file, 'r') as target:
wrapper = common.MMapWrapper(target)
lines = target.readlines()
for line in lines:
next_line = next(wrapper)
assert next_line.strip() == line.strip()
pytest.raises(StopIteration, next, wrapper)
def test_unknown_engine(self):
with tm.ensure_clean() as path:
df = tm.makeDataFrame()
df.to_csv(path)
with tm.assert_raises_regex(ValueError, 'Unknown engine'):
read_csv(path, engine='pyt')
| bsd-3-clause |
ujjvala-addsol/addsol_hr | openerp/addons/resource/faces/timescale.py | 170 | 3902 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import faces.pcalendar as pcal
import matplotlib.cbook as cbook
import datetime
import sys
class TimeScale(object):
def __init__(self, calendar):
self.data_calendar = calendar
self._create_chart_calendar()
self.now = self.to_num(self.data_calendar.now)
def to_datetime(self, xval):
return xval.to_datetime()
def to_num(self, date):
return self.chart_calendar.WorkingDate(date)
def is_free_slot(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1 != dt2
def is_free_day(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1.date() != dt2.date()
def _create_chart_calendar(self):
dcal = self.data_calendar
ccal = self.chart_calendar = pcal.Calendar()
ccal.minimum_time_unit = 1
#pad worktime slots of calendar (all days should be equally long)
slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots))
day_sum = lambda day: slot_sum(dcal.get_working_times(day))
max_work_time = max(map(day_sum, range(7)))
#working_time should have 2/3
sum_time = 3 * max_work_time / 2
#now create timeslots for ccal
def create_time_slots(day):
src_slots = dcal.get_working_times(day)
slots = [0, src_slots, 24*60]
slots = tuple(cbook.flatten(slots))
slots = zip(slots[:-1], slots[1:])
#balance non working slots
work_time = slot_sum(src_slots)
non_work_time = sum_time - work_time
non_slots = filter(lambda s: s not in src_slots, slots)
non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
non_slots.sort()
slots = []
i = 0
for l, s in non_slots:
delta = non_work_time / (len(non_slots) - i)
delta = min(l, delta)
non_work_time -= delta
slots.append((s[0], s[0] + delta))
i += 1
slots.extend(src_slots)
slots.sort()
return slots
min_delta = sys.maxint
for i in range(7):
slots = create_time_slots(i)
ccal.working_times[i] = slots
min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots)))
ccal._recalc_working_time()
self.slot_delta = min_delta
self.day_delta = sum_time
self.week_delta = ccal.week_time
_default_scale = TimeScale(pcal._default_calendar)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
drammock/mne-python | examples/decoding/decoding_spatio_temporal_source.py | 17 | 4713 | """
.. _tut_dec_st_source:
==========================
Decoding source space data
==========================
Decoding to MEG data in source space on the left cortical surface. Here
univariate feature selection is employed for speed purposes to confine the
classification to a small number of potentially relevant features. The
classifier then is trained to selected features of epochs in source space.
"""
# sphinx_gallery_thumbnail_number = 2
# Author: Denis A. Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# Jean-Remi King <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.linear_model import LogisticRegression
import mne
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.decoding import (cross_val_multiscore, LinearModel, SlidingEstimator,
get_coef)
print(__doc__)
data_path = mne.datasets.sample.data_path()
fname_fwd = data_path + 'MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax = -0.2, 0.8
event_id = dict(aud_r=2, vis_r=4) # load contra-lateral conditions
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(None, 10., fir_design='firwin')
events = mne.read_events(event_fname)
# Set up pick list: MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443'] # mark bads
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6),
decim=5) # decimate to save memory and increase speed
###############################################################################
# Compute inverse solution
snr = 3.0
noise_cov = mne.read_cov(fname_cov)
inverse_operator = read_inverse_operator(fname_inv)
stcs = apply_inverse_epochs(epochs, inverse_operator,
lambda2=1.0 / snr ** 2, verbose=False,
method="dSPM", pick_ori="normal")
###############################################################################
# Decoding in sensor space using a logistic regression
# Retrieve source space data into an array
X = np.array([stc.lh_data for stc in stcs]) # only keep left hemisphere
y = epochs.events[:, 2]
# prepare a series of classifier applied at each time sample
clf = make_pipeline(StandardScaler(), # z-score normalization
SelectKBest(f_classif, k=500), # select features for speed
LinearModel(LogisticRegression(C=1, solver='liblinear')))
time_decod = SlidingEstimator(clf, scoring='roc_auc')
# Run cross-validated decoding analyses:
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)
# Plot average decoding scores of 5 splits
fig, ax = plt.subplots(1)
ax.plot(epochs.times, scores.mean(0), label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.axvline(0, color='k')
plt.legend()
###############################################################################
# To investigate weights, we need to retrieve the patterns of a fitted model
# The fitting needs not be cross validated because the weights are based on
# the training sets
time_decod.fit(X, y)
# Retrieve patterns after inversing the z-score normalization step:
patterns = get_coef(time_decod, 'patterns_', inverse_transform=True)
stc = stcs[0] # for convenience, lookup parameters from first stc
vertices = [stc.lh_vertno, np.array([], int)] # empty array for right hemi
stc_feat = mne.SourceEstimate(np.abs(patterns), vertices=vertices,
tmin=stc.tmin, tstep=stc.tstep, subject='sample')
brain = stc_feat.plot(views=['lat'], transparent=True,
initial_time=0.1, time_unit='s',
subjects_dir=subjects_dir)
| bsd-3-clause |
danviv/trading-with-python | nautilus/nautilus.py | 77 | 5403 | '''
Created on 26 dec. 2011
Copyright: Jev Kuznetsov
License: BSD
'''
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ib.ext.Contract import Contract
from ib.opt import ibConnection
from ib.ext.Order import Order
import tradingWithPython.lib.logger as logger
from tradingWithPython.lib.eventSystem import Sender, ExampleListener
import tradingWithPython.lib.qtpandas as qtpandas
import numpy as np
import pandas
priceTicks = {1:'bid',2:'ask',4:'last',6:'high',7:'low',9:'close', 14:'open'}
class PriceListener(qtpandas.DataFrameModel):
def __init__(self):
super(PriceListener,self).__init__()
self._header = ['position','bid','ask','last']
def addSymbol(self,symbol):
data = dict(zip(self._header,[0,np.nan,np.nan,np.nan]))
row = pandas.DataFrame(data, index = pandas.Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
def priceHandler(self,sender,event,msg=None):
if msg['symbol'] not in self.df.index:
self.addSymbol(msg['symbol'])
if msg['type'] in self._header:
self.df.ix[msg['symbol'],msg['type']] = msg['price']
self.signalUpdate()
#print self.df
class Broker(Sender):
def __init__(self, name = "broker"):
super(Broker,self).__init__()
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self._id2symbol = {} # id-> symbol dict
self.tws = None
self._nextId = 1 # tws subscription id
self.nextValidOrderId = None
def connect(self):
""" connect to tws """
self.tws = ibConnection() # tws interface
self.tws.registerAll(self._defaultHandler)
self.tws.register(self._nextValidIdHandler,'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True,'')
self.tws.register(self._priceHandler,'TickPrice')
def subscribeStk(self,symbol, secType='STK', exchange='SMART',currency='USD'):
''' subscribe to stock data '''
self.log.debug('Subscribing to '+symbol)
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self._nextId
self._nextId += 1
self.tws.reqMktData(subId,c,'',False)
self._id2symbol[subId] = c.m_symbol
self.contracts[symbol]=c
def disconnect(self):
self.tws.disconnect()
#------event handlers--------------------
def _defaultHandler(self,msg):
''' default message handler '''
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def _nextValidIdHandler(self,msg):
self.nextValidOrderId = msg.orderId
self.log.debug( 'Next valid order id:{0}'.format(self.nextValidOrderId))
def _priceHandler(self,msg):
#translate to meaningful messages
message = {'symbol':self._id2symbol[msg.tickerId],
'price':msg.price,
'type':priceTicks[msg.field]}
self.dispatch('price',message)
#-----------------GUI elements-------------------------
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("print selected rows")
Action.triggered.connect(self.printName)
menu.exec_(event.globalPos())
def printName(self):
print "Action triggered from " + self.name
print 'Selected :'
for idx in self.selectionModel().selectedRows():
print self.model().df.ix[idx.row(),:]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
self.broker = Broker()
self.price = PriceListener()
self.broker.connect()
symbols = ['SPY','XLE','QQQ','VXX','XIV']
for symbol in symbols:
self.broker.subscribeStk(symbol)
self.broker.register(self.price.priceHandler, 'price')
widget = TableView(parent=self)
widget.setModel(self.price)
widget.horizontalHeader().setResizeMode(QHeaderView.Stretch)
layout = QVBoxLayout()
layout.addWidget(widget)
self.setLayout(layout)
def __del__(self):
print 'Disconnecting.'
self.broker.disconnect()
if __name__=="__main__":
print "Running nautilus"
import sys
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
print "All done." | bsd-3-clause |
madjam/mxnet | example/reinforcement-learning/ddpg/strategies.py | 42 | 2473 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
class BaseStrategy(object):
"""
Base class of exploration strategy.
"""
def get_action(self, obs, policy):
raise NotImplementedError
def reset(self):
pass
class OUStrategy(BaseStrategy):
"""
Ornstein-Uhlenbeck process: dxt = theta * (mu - xt) * dt + sigma * dWt
where Wt denotes the Wiener process.
"""
def __init__(self, env_spec, mu=0, theta=0.15, sigma=0.3):
self.mu = mu
self.theta = theta
self.sigma = sigma
self.action_space = env_spec.action_space
self.state = np.ones(self.action_space.flat_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
def reset(self):
self.state = np.ones(self.action_space.flat_dim) * self.mu
def get_action(self, obs, policy):
# get_action accepts a 2D tensor with one row
obs = obs.reshape((1, -1))
action = policy.get_action(obs)
increment = self.evolve_state()
return np.clip(action + increment,
self.action_space.low,
self.action_space.high)
if __name__ == "__main__":
class Env1(object):
def __init__(self):
self.action_space = Env2()
class Env2(object):
def __init__(self):
self.flat_dim = 2
env_spec = Env1()
test = OUStrategy(env_spec)
states = []
for i in range(1000):
states.append(test.evolve_state()[0])
import matplotlib.pyplot as plt
plt.plot(states)
plt.show()
| apache-2.0 |
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part04-e11_below_zero/test/test_below_zero.py | 1 | 1339 | #!/usr/bin/env python3
import unittest
from unittest.mock import patch
import pandas as pd
import re
import os
from tmc import points
from tmc.utils import load, get_out, patch_helper
module_name="src.below_zero"
below_zero = load(module_name, "below_zero")
main = load(module_name, "main")
ph = patch_helper(module_name)
@points('p04-11.1')
class BelowZero(unittest.TestCase):
def test_value(self):
ret_val = below_zero()
self.assertEqual(ret_val, 49, msg="Incorrect return value!")
def test_output(self):
main()
out = get_out()
pattern = r"Number of days below zero:\s+\d+"
self.assertRegex(out, pattern, msg="Output is not in correct form!")
def test_called(self):
with patch(ph("below_zero"), wraps=below_zero) as pbz,\
patch(ph("pd.read_csv"), wraps=pd.read_csv) as prc:
main()
pbz.assert_called_once()
prc.assert_called_once()
args, kwargs = prc.call_args
self.assertEqual(os.path.basename(args[0]),
"kumpula-weather-2017.csv", msg="Wrong filename given to read_csv!")
if "sep" in kwargs:
self.assertEqual(kwargs["sep"], ",", msg="Incorrect separator in call to read_csv!")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.