repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
AnasGhrab/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
MurpheyLab/trep | trep/discopt/doptimizer.py | 1 | 24014 | import time
import datetime
import numpy as np
import trep
import dlqr
import numpy.linalg
from numpy import dot
from collections import namedtuple
try:
import matplotlib.pyplot as pyplot
pyplot_available = True
except ImportError:
pyplot_available = False
class DOptimizerMonitor(object):
"""
This is the base class for Optimizer Monitors. It does absolutely
nothing, so you can use this as your monitor if you want
completely silent operation.
"""
def optimize_begin(self, X, U):
"""
Called when DOptimizer.optimize() is called with the initial
trajectory.
"""
pass
def optimize_end(self, converged, X, U, cost):
"""
Called before DOptimizer.optimzie() returns with the results
of the optimization.
"""
pass
def step_begin(self, iteration):
"""
Called at the start of each DOptimize.step(). Note that step
calls itself with the new method when one method fails, so
this might be called multiple times with the same iteration.
All calls will be related to the same iteration until
step_termination or step_completed are called.
"""
pass
def step_info(self, method, cost, dcost, X, U, dX, dU, Kproj):
"""
Called after a descent direction has been calculated.
"""
pass
def step_method_failure(self, method, cost, dcost, fallback_method):
"""
Called when a descent method results in a positive cost
derivative.
"""
pass
def step_termination(self, cost, dcost):
"""
Called if dcost satisfies the descent tolerance, indicating
that the current trajectory is a local minimizer.
"""
pass
def step_completed(self, method, cost, nX, nU):
"""
Called at the end of an optimization step with information
about the new trajectory.
"""
pass
def armijo_simulation_failure(self, armijo_iteration, nX, nU, bX, bU):
"""
Called when a simulation fails (usually an instability) during
the evaluation of the cost in an armijo step. The Armijo
search continues after this.
"""
pass
def armijo_search_failure(self, X, U, dX, dU, cost0, dcost0, Kproj):
"""
Called when the Armijo search reaches the maximum number of
iterations without satisfying the sufficient decrease
criteria. The optimization cannot proceed after this.
"""
pass
def armijo_evaluation(self, armijo_iteration, nX, nU, bX, bU, cost, max_cost):
"""
Called after each Armijo evaluation. The semi-trajectory
bX,bU was succesfully projected into the new trajectory nX,nU
and its cost was measured. The search will continue if the
cost is greater than the maximum cost.
"""
pass
class DOptimizerDefaultMonitor(DOptimizerMonitor):
"""
This is the default DOptimizer Monitor. It mainly prints status
updates to stdout and records the cost and dcost history.
"""
def __init__(self):
self.iteration = 0
self.start_cost = 0
self.start_dcost = 0
self.method = ''
self.start_time = None
self.cost_history = {}
self.dcost_history = {}
def msg(self, msg):
if self.start_time is not None:
delta = datetime.datetime.now() - self.start_time
timestamp = time.strftime("+[%H:%M:%S]",time.gmtime(delta.seconds))
else:
timestamp = datetime.datetime.now().strftime('[%H:%M:%S]')
print "%s %3d: %s" % (timestamp, self.iteration, msg)
def optimize_begin(self, X, U):
self.cost_history = {}
self.dcost_history = {}
self.start_time = datetime.datetime.now()
def optimize_end(self, converged, X, U, cost):
print ""
self.start_time = None
def step_begin(self, iteration):
self.iteration = iteration
def step_info(self, method, cost, dcost, X, U, dX, dU, Kproj):
self.start_cost = cost
self.start_dcost = dcost
self.method = method
self.cost_history[self.iteration] = cost
self.dcost_history[self.iteration] = dcost
def step_method_failure(self, method, cost, dcost, fallback_method):
self.msg("Descent method %r failed (dcost=%s), fallbacking to %s" % (
self.method, dcost, fallback_method))
def step_termination(self, cost, dcost):
self.msg("Optimization Terminated. cost=%s dcost=%s" % (cost, dcost))
def step_completed(self, method, cost, nX, nU):
self.msg("cost=(%s => %s) dcost=%s method=%s armijo=%d" % (
self.start_cost, cost, self.start_dcost, method, self.armijo))
def armijo_simulation_failure(self, armijo_iteration, nX, nU, bX, bU):
self.msg(" Armijo simulation (%d) failed after %d steps." % (
armijo_iteration, len(nX)))
def armijo_search_failure(self, X, U, dX, dU, cost0, dcost0, Kproj):
pass
def armijo_evaluation(self, armijo_iteration, nX, nU, bX, bU, cost, max_cost):
self.armijo = armijo_iteration
def get_costs(self):
return [self.cost_history[x] for x in sorted(self.cost_history.keys())]
def get_dcosts(self):
return [self.dcost_history[x] for x in sorted(self.dcost_history.keys())]
class DOptimizerVerboseMonitor(DOptimizerDefaultMonitor):
"""
The verbose DOptimizer Monitor prints more information.
"""
def optimize_begin(self, X, U):
self.msg("Optimization starting at %s" % datetime.datetime.now().strftime('[%H:%M:%S]'))
super(DOptimizerVerboseMonitor, self).optimize_begin(X, U)
def optimize_end(self, converged, X, U, cost):
self.msg("Optimization completed at %s" % datetime.datetime.now().strftime('[%H:%M:%S]'))
super(DOptimizerVerboseMonitor, self).optimize_end(converged, X, U, cost)
def step_info(self, method, cost, dcost, X, U, dX, dU, Kproj):
self.msg("Current Trajectory cost: %f, dcost: %f, method=%s" % (cost, dcost, method))
super(DOptimizerVerboseMonitor, self).step_info(method, cost, dcost, X, U, dX, dU, Kproj)
def armijo_evaluation(self, armijo_iteration, nX, nU, bX, bU, cost, max_cost):
if cost >= max_cost:
self.msg(" Armijo evaluation (%d) is too expensive (%f >= %f)" % (
armijo_iteration, cost, max_cost))
super(DOptimizerVerboseMonitor, self).armijo_evaluation(
armijo_iteration, nX, nU, bX, bU, cost, max_cost)
class DOptimizer(object):
def __init__(self, dsys, cost,
first_method_iterations=10,
monitor=None):
self.dsys = dsys
self.cost = cost
self.optimize_ic = False
if monitor is None:
self.monitor = DOptimizerDefaultMonitor()
else:
self.monitor = monitor
# Default weights used to generate feedback controller for the
# projection. These can be changed if desired.
Qproj = np.eye(self.dsys.nX)
Rproj = np.eye(self.dsys.nU)
self.Qproj = lambda k: Qproj
self.Rproj = lambda k: Rproj
self.armijo_beta = 0.7
self.armijo_alpha = 0.00001
self.armijo_max_iterations = 30
self.descent_tolerance = 1e-6
# Number of first order iterations to do at the start of an optimization
self.first_method_iterations = first_method_iterations
self.first_method = 'quasi'
self.second_method = 'newton'
# Named tuples types for function returns
self.step_return = namedtuple('step', 'done nX nU dcost0 cost1')
self.optimize_return = namedtuple('optimize', 'converged X U')
self.check_dcost_return = namedtuple('check_dcost', 'result error cost1 cost0 approx_dcost exact_dcost')
self.check_ddcost_return = namedtuple('check_ddcost', 'result error cost1 cost0 approx_ddcost exact_ddcost')
self.model_return = namedtuple('descent_model', 'Q R S')
self.descent_return = namedtuple('calc_descent_direction', 'Kproj dX dU Q R S')
self.armijo_simulate_return = namedtuple('armijo_simulate', 'success nX nU')
self.armijo_search_return = namedtuple('armijo_search', 'nX nU cost1')
def calc_cost(self, X, U):
"""Calculate the cost of a trajectory X,U."""
cost = 0.0
for k in range(len(X)-1):
cost += self.cost.l(X[k], U[k], k)
cost += self.cost.m(X[-1])
return cost
def calc_dcost(self, X, U, dX, dU):
"""
Calculate the derivative of the cost function evaluated at X,U
in the direction of a tangent trajectory dX,dU.
"""
dcost = 0.0
for k in range(len(X)-1):
dcost += (dot(self.cost.l_dx(X[k], U[k], k), dX[k]) +
dot(self.cost.l_du(X[k], U[k], k), dU[k]))
dcost += dot(self.cost.m_dx(X[-1]), dX[-1])
return dcost
def calc_ddcost(self, X, U, dX, dU, Q, R, S):
"""
Calculate the second derivative of the cost function evaluated
at X,U in the direction of a tangent trajectoyr dX and dU.
The second order model parameters must be specified in Q,R,S.
These can be obtained through DOptimizer.calc_newton_model()
or by Doptimizer.calc_descent_direction() when method='newton'.
"""
ddcost = 0.0
for k in range(len(X) - 1):
ddcost += (dot(dX[k], dot(Q(k), dX[k])) +
2*dot(dX[k], dot(S(k), dU[k])) +
dot(dU[k], dot(R(k), dU[k])))
ddcost += dot(dot(dX[-1], Q(-1)), dX[-1])
return ddcost
def calc_steepest_model(self):
"""
Calculate a quadratic model to find a steepest descent
direction. This is simply Q=I, R=I, S=0.
"""
Q = np.eye(self.dsys.nX)
R = np.eye(self.dsys.nU)
S = np.zeros((self.dsys.nX, self.dsys.nU))
return self.model_return(lambda k: Q, lambda k: R, lambda k: S)
def calc_quasi_model(self, X, U):
"""
Calculate a quadratic model to find a quasi-newton descent
direction. This takes into account the derivative of the cost
function without considered system dynamics.
"""
Q = [None]*len(X)
S = [None]*(len(X)-1)
R = [None]*(len(X)-1)
Q[-1] = self.cost.m_dxdx(X[-1])
for k in reversed(range(len(X)-1)):
Q[k] = self.cost.l_dxdx(X[k], U[k], k)
S[k] = self.cost.l_dxdu(X[k], U[k], k)
R[k] = self.cost.l_dudu(X[k], U[k], k)
return self.model_return(lambda k: Q[k], lambda k: R[k], lambda k: S[k])
def calc_newton_model(self, X, U, A, B, K):
"""
Calculate a quadratic model to find a newton descent
direction. This solves the backwards discrete adjoint
equation.
"""
Q = [None]*len(X)
S = [None]*(len(X)-1)
R = [None]*(len(X)-1)
z = self.cost.m_dx(X[-1])
Q[-1] = self.cost.m_dxdx(X[-1])
for k in reversed(range(len(X)-1)):
Q[k] = self.cost.l_dxdx(X[k], U[k], k)
S[k] = self.cost.l_dxdu(X[k], U[k], k)
R[k] = self.cost.l_dudu(X[k], U[k], k)
self.dsys.set(X[k], U[k], k,
xk_hint=X[k+1])
Q[k] += self.dsys.fdxdx(z)
S[k] += self.dsys.fdxdu(z)
R[k] += self.dsys.fdudu(z)
z = (self.cost.l_dx(X[k], U[k], k) -
dot(self.cost.l_du(X[k], U[k], k), K[k]) +
dot(z, (A[k] - dot(B[k], K[k]))))
return self.model_return(lambda k: Q[k], lambda k: R[k], lambda k: S[k])
def calc_descent_direction(self, X, U, method='steepest'):
"""
Calculate the descent direction from the trajectory X,U using
the specified method. Valid methods are:
'steepest'
'quasi'
'newton'
The method returns the tuple (Kproj, dX, dU, Q, R, S).
"""
(Kproj, A, B) = self.dsys.calc_feedback_controller(X, U,
self.Qproj, self.Rproj,
True)
# All descent direction methods use the same linear cost
# terms.
q = np.zeros(X.shape)
r = np.zeros(U.shape)
for k in xrange(len(X)-1):
q[k] = self.cost.l_dx(X[k], U[k], k)
r[k] = self.cost.l_du(X[k], U[k], k)
q[-1] = self.cost.m_dx(X[-1])
# Calculate the quadratic model according to the desired
# method.
if method == 'steepest':
(Q,R,S) = self.calc_steepest_model()
elif method == 'quasi':
(Q,R,S) = self.calc_quasi_model(X, U)
elif method == 'newton':
(Q,R,S) = self.calc_newton_model(X, U, A, B, Kproj)
else:
raise StandardError("Invalid descent direction method: %r" % method)
(K,C,P,b) = dlqr.solve_tv_lq(A, B, q, r, Q, S, R)
# If the optimization includes initial conditions, we need to
# find an initial condition that minimizes the LQ solution.
# This currently is only valid for unconstrained systems.
if self.optimize_ic:
dx0 = -np.linalg.solve(P, b)
else:
dx0 = np.zeros((self.dsys.nX,))
# Calculate the descent direction by simulating the linearized
# system using the LQ solution's optimal input.
dX = np.zeros(X.shape)
dU = np.zeros(U.shape)
dX[0] = dx0
for k in xrange(len(X)-1):
dU[k] = -dot(K[k],dX[k]) - C[k]
dX[k+1] = dot(A[k],dX[k]) + dot(B[k],dU[k])
return self.descent_return(Kproj, dX, dU, Q, R, S)
def armijo_simulate(self, bX, bU, Kproj):
"""
This is a sub-function for armijo search. It projects the
trajectory bX,bU to a real trajectory like DSystem.project,
but it also returns a partial trajectory if the simulation
fails.
"""
# If still spending a lot of time in armijo search, move the
# cost comparison to this loop so we can abort the simulation
# as soon as possible.
nX = np.zeros(bX.shape)
nU = np.zeros(bU.shape)
try:
nX[0] = bX[0]
for k in range(len(bX)-1):
nU[k] = bU[k] - dot(Kproj[k], nX[k] - bX[k])
if k == 0:
self.dsys.set(nX[k], nU[k], k)
else:
self.dsys.step(nU[k])
nX[k+1] = self.dsys.f()
except trep.ConvergenceError:
return (False, nX[:k], nU[:k])
return self.armijo_simulate_return(True, nX, nU)
def armijo_search(self, X, U, Kproj, dX, dU):
"""
Perform an Armijo line search from the trajectory X,U along
the tangent trajectory dX, dU. Returns the tuple (nX, nU,
nCost).
"""
cost0 = self.calc_cost(X, U)
dcost0 = self.calc_dcost(X, U, dX, dU)
for m in range(0, self.armijo_max_iterations):
lam = self.armijo_beta**m
max_cost = cost0 + self.armijo_alpha* lam * dcost0
bX = X + lam*dX
bU = U + lam*dU
(result, nX, nU) = self.armijo_simulate(bX, bU, Kproj)
if not result:
self.monitor.armijo_simulation_failure(m, nX, nU, nX, bU)
continue
cost1 = self.calc_cost(nX, nU)
self.monitor.armijo_evaluation(m, nX, nU, bX, bU, cost1, max_cost)
if cost1 < max_cost:
return self.armijo_search_return(nX, nU, cost1)
else:
self.monitor.armijo_search_failure(X, U, dX, dU, cost0, dcost0, Kproj)
raise trep.ConvergenceError("Armijo Failed to Converge")
def step(self, iteration, X, U, method='steepest'):
"""
Perform an optimization step.
Find a new trajectory nX, nU that has a lower cost than the
trajectory X,U. Valid methods are defined in
DOptimizer.calc_descent_direction().
Returns the named tuple (done, nX, nU, dcost0, cost1) where:
'done' is a boolean that is True if the trajectory X,U cannot
be improved (i.e, X,U is a local minimizer)
nX,nU are the improved trajectory
dcost0 is the derivative of the cost at X,U
cost1 is the cost of the improved trajectory.
"""
self.monitor.step_begin(iteration)
(Kproj, dX, dU, Q, R, S) = self.calc_descent_direction(X, U, method)
cost0 = self.calc_cost(X, U)
dcost0 = self.calc_dcost(X, U, dX, dU)
self.monitor.step_info(method, cost0, dcost0, X, U, dX, dU, Kproj)
# Check for sane descent direction
if dcost0 > 0:
fallback = self.select_fallback_method(iteration, method)
self.monitor.step_method_failure(method, cost0, dcost0, fallback)
return self.step(iteration, X, U, fallback)
# Check for terminal condition
if abs(dcost0) < self.descent_tolerance:
self.monitor.step_termination(cost0, dcost0)
return self.step_return(True, X, U, dcost0, cost0)
# Line search in descent direction
(X, U, cost1) = self.armijo_search(X, U, Kproj, dX, dU)
self.monitor.step_completed(method, cost1, X, U)
return self.step_return(False, X, U, dcost0, cost1)
def select_method(self, iteration):
"""
Select a descent direction method for the specified iteration.
This is called by optimize() to choose a descent direction
method for each step. The default implementation takes
'self.first_method_iterations' steps of 'self.first_method'
and then switches to 'self.second_method' steps.
"""
if iteration < self.first_method_iterations:
method = self.first_method
else:
method = self.second_method
return method
def select_fallback_method(self, iteration, current_method):
"""
When DOptimizer.step() finds a bad descent direction (e.g,
positive cost derivative), it calls this method to figure out
what descent direction it should use next.
"""
if current_method == 'newton':
return 'quasi'
elif current_method == 'quasi':
return 'steepest'
else:
# This should never occur
raise StandardError("Derivative of cost is positive for steepest descent.")
def optimize(self, X, U, max_steps=50):
"""
Iteratively optimize the trajectory X,U.
This function calls DOptimizer.step() until a local minimizer
is found or 'max_steps' iterations were taken.
Returns the named tuple (converged, X, U) where:
converged is a boolean indicating if the optimization finished
on a local minimizer.
X,U is the improved trajectory.
"""
X = np.array(X)
U = np.array(U)
self.monitor.optimize_begin(X, U)
for i in range(max_steps):
method = self.select_method(i)
(converged, X, U, cost, method) = self.step(i, X, U, method)
if converged:
break
self.monitor.optimize_end(converged, X, U, cost)
return self.optimize_return(converged, X, U)
def descent_plot(self, X, U, method='steepest', points=40, legend=True):
"""
Create a descent direction plot at X,U for the specified method.
"""
if not pyplot_available:
raise StandardError("Importing matplotlib failed. Cannot create plot.")
(Kproj, dX, dU, Q, R, S) = self.calc_descent_direction(X, U, method)
def calc_cost(zi):
return self.calc_cost(*self.dsys.project(X + zi*dX, U + zi*dU, Kproj))
armijo_z = np.array(sorted([self.armijo_beta**m for m in range(20)]))
z = np.linspace(-0.1, 1.01, points)
z = np.concatenate((z, armijo_z))
z.sort()
# Used to calculate the model costs
cost = self.calc_cost(X, U)
dcost = self.calc_dcost(X, U, dX, dU)
ddcost = self.calc_ddcost(X, U, dX, dU, Q, R, S)
true_cost = np.zeros(z.shape)
model_cost = np.zeros(z.shape)
for i,zi in enumerate(z):
true_cost[i] = calc_cost(zi)
model_cost[i] = cost + dcost*zi + 0.5*ddcost*zi*zi
armijo_cost = np.zeros(armijo_z.shape)
for i,zi in enumerate(armijo_z):
armijo_cost[i] = calc_cost(zi)
armijo_max = np.zeros(z.shape)
for i,zi in enumerate(z):
armijo_max[i] = cost + self.armijo_alpha* zi * dcost
pyplot.hold(True)
pyplot.plot(z, model_cost-cost, '-,', linewidth=2.0, color='blue', label='Modeled Cost')
pyplot.plot(z, true_cost-cost, '.-', linewidth=1.0, color='black', label='True Cost')
pyplot.plot(armijo_z, armijo_cost-cost, 'o', color='gray', label='Armijo Evaluations')
pyplot.plot(z, armijo_max-cost, '-.', color='black', label='Required Cost Improvement')
pyplot.hold(False)
if legend:
pyplot.legend(loc=0)
pyplot.title('Cost along descent direction for method: "%s".' % method)
pyplot.xlabel('z')
pyplot.ylabel(r'$\Delta$ cost')
def check_dcost(self, X, U, method='steepest', delta=1e-6, tolerance=1e-5):
"""
Check the calculated derivative of the cost function at X,U
with a numeric approximation determined from the original cost
function.
"""
(Kproj, dX, dU, Q, R, S) = self.calc_descent_direction(X, U, method)
exact_dcost = self.calc_dcost(X, U, dX, dU)
nX, nU = self.dsys.project(X - delta*dX, U - delta*dU, Kproj)
cost0 = self.calc_cost(nX, nU)
nX, nU = self.dsys.project(X + delta*dX, U + delta*dU, Kproj)
cost1 = self.calc_cost(nX, nU)
approx_dcost = (cost1 - cost0)/(2*delta)
error = approx_dcost - exact_dcost
result = (abs(error) <= tolerance)
return self.check_dcost_return(result, error, cost1, cost0, approx_dcost, exact_dcost)
def check_ddcost(self, X, U, method='steepest', delta=1e-6, tolerance=1e-5):
"""
Check the second derivative of the cost function at X,U with a
numeric approximation determined from the first derivative.
"""
(Kproj, dX, dU, Q, R, S) = self.calc_descent_direction(X, U, method)
if method != 'newton':
(Q, R, S) = self.calc_descent_direction(X, U, 'newton')[-3:]
exact_ddcost = self.calc_ddcost(X, U, dX, dU, Q, R, S)
# Calculate cost0
bX = X - delta*dX
bU = U - delta*dU
nX, nU = self.dsys.project(bX, bU, Kproj)
(A, B) = self.dsys.linearize_trajectory(nX, nU)
(ndX, ndU) = self.dsys.dproject(A, B, dX, dU, Kproj)
dcost0 = self.calc_dcost(nX, nU, ndX, ndU)
# Calculate cost1
bX = X + delta*dX
bU = U + delta*dU
nX, nU = self.dsys.project(bX, bU, Kproj)
(A, B) = self.dsys.linearize_trajectory(nX, nU)
(ndX, ndU) = self.dsys.dproject(A, B, dX, dU, Kproj)
dcost1 = self.calc_dcost(nX, nU, ndX, ndU)
approx_ddcost = (dcost1 - dcost0)/(2*delta)
error = approx_ddcost - exact_ddcost
result = (abs(error) <= tolerance)
return self.check_ddcost_return(result, error, dcost1, dcost0, approx_ddcost, exact_ddcost)
| gpl-3.0 |
danxhuber/k2epic | checkk2fov/projection.py | 1 | 11989 | try:
import matplotlib.pyplot as mp
except ImportError:
pass
import numpy as np
import rotate
__version__ = "$Id: projection.py 36 2014-01-23 22:19:15Z fergalm $"
__URL__ = "$URL: http://svn.code.sf.net/p/keplertwowheel/code/py/projection.py $"
class Projection():
"""Base Projection class. Used for mapping ra and dec into
Euclidean space based on a given projection.
The primary reference for projections is Calabretta and Greisen
(2002), A&A 395, 1077
The base class implements the Plate Carree projection (\S 5.2.3)
which just maps ra dec to xy -- i.e what you would blindly do
if you knew no different. If distortion is not a concern
this is an acceptable approach
"""
def __init__(self):
pass
def skyToPix(self, ra_deg, dec_deg):
return ra_deg, dec_deg
def pixToSky(self, x, y):
return x, y
def eulerRotate(self, ra_deg, dec_deg):
ra_deg, dec_deg = self.parseInputs(ra_deg, dec_deg)
#Transform ra dec into angle away from tangent point
#using the rotation matrix
theta_rad= np.empty( (len(ra_deg),) )
phi_rad = theta_rad * 0
R = self.Rmatrix
for i in range(len(ra_deg)):
#Convert the ra/dec to a vector, then rotate so
#that the tangent point is at [1,0,0]. Then pull out
#the angle relative to the x-axis, and the angle
#around the y-z plane.
#@TODO: Can I make this faster with dot products?
vec =rotate.vecFromRaDec(ra_deg[i], dec_deg[i])
aVec = np.dot(R, vec)
#aVec = (sint, cost*cosp, cost*sinp)
sint = aVec[0]
cost = np.hypot(aVec[1], aVec[2])
theta = np.arctan2(sint, cost)
cost = np.cos(theta)
cosp = aVec[1] / cost
sinp = aVec[2] / cost
phi = np.arctan2(sinp, cosp)
if phi < 0:
phi += 2*np.pi
if phi > 2*np.pi:
phi -= 2*np.pi
#Just to be explicit
theta_rad[i] = theta
phi_rad[i] = phi
return theta_rad, phi_rad
def parseInputs(self, ra_deg, dec_deg):
try:
len(ra_deg)
except TypeError:
ra_deg = np.array([ra_deg])
try:
len(dec_deg)
except TypeError:
dec_deg = np.array([dec_deg])
#If ra/dec aren't arrays, make them arrays
if not isinstance(ra_deg, np.ndarray):
ra_deg = np.array(ra_deg)
if not isinstance(dec_deg, np.ndarray):
dec_deg = np.array(dec_deg)
if np.logical_xor(len(ra_deg) == 1, len(dec_deg) == 1):
if len(ra_deg) == 1:
ra_deg = dec_deg *0 + ra_deg[0]
else:
dec_deg = ra_deg * 0 + dec_deg[0]
if len(ra_deg) != len(dec_deg):
raise ValueError("Input ra and dec arrays must be same length")
return ra_deg, dec_deg
def plot(self, ra_deg, dec_deg, *args, **kwargs):
x,y = self.skyToPix(ra_deg, dec_deg)
try:
plot_degrees = kwargs.pop('plot_degrees')
except KeyError:
plot_degrees=False
if plot_degrees:
x,y = np.degrees(x), np.degrees(y)
self._plot(x, y, *args, **kwargs)
def scatter(self, ra_deg, dec_deg, *args, **kwargs):
x,y = self.skyToPix(ra_deg, dec_deg)
mp.scatter(x,y, *args, **kwargs)
def text(self, ra_deg, dec_deg, s, *args, **kwargs):
x,y = self.skyToPix(ra_deg, dec_deg)
mp.text(x, y, s, *args, **kwargs)
def plotGrid(self, lineWidth=1, stepInDegrees=15, colour="#777777", \
raRange=[0,360], decRange=[-90, 90]):
ra0, ra1 = raRange
dec0, dec1 = decRange
step=stepInDegrees
c = colour
ra_deg = np.arange(ra0-1*step, ra1+1.5*step, 1, dtype=np.float)
for dec in np.arange(dec0, dec1+ 1*step, step):
self.plotLine(ra_deg, dec, '-', color=c, linewidth=lineWidth)
dec = np.arange(dec0-step, dec1+1.5*step, 1, dtype=float)
for ra in np.arange(ra0, ra1+step, step):
self.plotLine(ra, dec, '-', color=c, linewidth=lineWidth)
##Useful for debugging
#self.plotLine(0, dec,'r-', linewidth=lineWidth)
#self.plotLine(180, dec,'c-', linewidth=lineWidth)
def plotLine(self, ra_deg, dec_deg, *args, **kwargs):
ra_deg, dec_deg = self.parseInputs(ra_deg, dec_deg)
x,y = self.skyToPix(ra_deg, dec_deg)
diffX = np.abs(np.diff(x))
idx1 = diffX > 3*np.mean(diffX)
idx1[idx1 + 1] = True
diffY = np.abs(np.diff(y))
idx2 = diffY > 3*np.mean(diffY)
j = 0
i0 = 0
if len(idx2) > 0:
idx2[-1] = True
idx = np.where(np.logical_or(idx1, idx2))[0]
for j in range(len(idx)):
i1 = idx[j]
self._plot(x[i0:i1], y[i0:i1], *args, **kwargs)
i0 = i1+1
def _plot(self, x, y, *args, **kwargs):
mp.plot(x,y, *args, **kwargs)
class PlateCaree(Projection):
"""Synonym for the base class"""
pass
class HammerAitoff(Projection):
def __init__(self, ra0_deg, dec0_deg):
Projection.__init__(self)
self.ra0_deg = ra0_deg
self.dec0_deg = dec0_deg
self.ra0_deg = ra0_deg
self.dec0_deg = dec0_deg
#This projection assumes ra ranges from -180 to +180
#if self.ra0_deg > 180:
#self.ra0_deg -= 360
#Construct rotation matrix used to convert ra/dec into
#angle relative to tangent point
Rdec = rotate.declinationRotationMatrix(-self.dec0_deg)
Rra = rotate.rightAscensionRotationMatrix(-self.ra0_deg)
self.Rmatrix = np.dot(Rra, Rdec)
def skyToPix(self, ra_deg, dec_deg):
sin = np.sin
cos = np.cos
#Parse inputs and allocate space for outputs
ra_deg, dec_deg = self.parseInputs(ra_deg, dec_deg)
long_deg = ra_deg * 0
lat_deg = long_deg * 0
#Get longitude and latitude relative to defined origin.
for i in range(len(ra_deg)):
vec = rotate.vecFromRaDec(ra_deg[i], dec_deg[i])
aVec = np.dot( self.Rmatrix, vec)
long_deg[i], lat_deg[i] = rotate.raDecFromVec(aVec)
long_deg = np.fmod(long_deg + 180, 360.)
long_rad = np.radians(long_deg) - np.pi #[-pi,pi]
lat_rad = np.radians(lat_deg)
#long_rad = np.fmod(long_rad+ np.pi, 2*np.pi)
gamma = 1 + cos(lat_rad)* cos(long_rad/2.)
gamma = np.sqrt(2/gamma)
x = -2*gamma*cos(lat_rad)*sin(long_rad/2)
y = gamma*sin(lat_rad)
return x, y
def pixToSky(self, x, y):
raise NotImplementedError("pixToSky not defined!")
class Gnomic(Projection):
def __init__(self, ra0_deg, dec0_deg):
self.ra0_deg = ra0_deg
self.dec0_deg = dec0_deg
#Construct rotation matrix used to convert ra/dec into
#angle relative to tangent point
Rdec = rotate.declinationRotationMatrix(-self.dec0_deg)
Rra = rotate.rightAscensionRotationMatrix(-self.ra0_deg)
self.Rmatrix = np.dot(Rdec, Rra)
#Check I created the matrix correctly.
origin = rotate.vecFromRaDec(self.ra0_deg, self.dec0_deg)
origin = np.dot(self.Rmatrix, origin)
assert( np.fabs(origin[0] -1 ) < 1e-9)
assert( np.fabs(origin[1]) < 1e-9)
assert( np.fabs(origin[2]) < 1e-9)
def skyToPix(self, ra_deg, dec_deg):
ra_deg, dec_deg = self.parseInputs(ra_deg, dec_deg)
#Transform ra dec into angle away from tangent point
#using the rotation matrix
theta_rad= np.empty( (len(ra_deg),) )
phi_rad = theta_rad * 0
R = self.Rmatrix
for i in range(len(ra_deg)):
#Convert the ra/dec to a vector, then rotate so
#that the tangent point is at [1,0,0]. Then pull out
#the angle relative to the x-axis, and the angle
#around the y-z plane.
#@TODO: Can I make this faster with dot products?
vec =rotate.vecFromRaDec(ra_deg[i], dec_deg[i])
aVec = np.dot(R, vec)
#aVec = (sint, cost*cosp, cost*sinp)
sint = aVec[0]
cost = np.hypot(aVec[1], aVec[2])
theta = np.arctan2(sint, cost)
cost = np.cos(theta)
cosp = aVec[1] / cost
sinp = aVec[2] / cost
phi = np.arctan2(sinp, cosp)
if phi < 0:
phi += 2*np.pi
if phi > 2*np.pi:
phi -= 2*np.pi
#Just to be explicit
theta_rad[i] = theta
phi_rad[i] = phi
#Project onto tangent plane
#theta_rad = np.pi/2. - theta_rad
r = 1/(np.tan(theta_rad) + 1e-10) #Prevent division by zero
x = r * np.cos(phi_rad)
y = r * np.sin(phi_rad)
#print x, y
return x, y
def pixToSky(self, x, y):
x, y = self.parseInputs(x, y)
R = self.Rmatrix
invR = np.matrix(R.transpose())
ra_deg = np.empty( (len(x),))
dec_deg = np.empty( (len(x),))
for i in range(len(x)):
phi_rad = np.arctan2(y,x)
r = np.hypot(x,y)
theta_rad = np.arctan(r)
aVec = np.zeros((3,))
aVec[0] = np.cos(theta_rad)
aVec[1] = np.sin(theta_rad)*np.cos(phi_rad)
aVec[2] = np.sin(theta_rad)*np.sin(phi_rad)
vec = np.dot(invR, aVec)
vec = np.array(vec)[0] #Convert to 1d array
ra_deg[i], dec_deg[i] = rotate.raDecFromVec(vec)
return ra_deg, dec_deg
class Cylindrical(Projection):
"""Stunted cyclindical projection that assumes
projection point is always at sky point 0,0
"""
def __init__(self):
self.ra0_deg = 0
self.dec0_deg = 0
def skyToPix(self, ra_deg, dec_deg):
x = np.radians(ra_deg)
y = np.sin( np.radians(dec_deg))
return x, y
def pixToSky(self, x, y):
ra = np.degrees(x)
dec = np.degrees(np.arcsin(y))
return ra, dec
def main():
mp.clf()
p = HammerAitoff(45,23)
#p = Projection()
#import pdb; pdb.set_trace()
#print p.skyToPix(30,0)
#print p.skyToPix(30,30)
#print p.skyToPix(90, 30)
#print p.skyToPix(181,30)
#print p.skyToPix(270,0)
#ra = np.arange(-180, 180, 15)
#p.plot(ra, 0, 'r-')
#p.plot(ra, 30, 'g-')
#p.plot(ra, 45, 'b-')
#p.plot(ra, 75, 'c-')
#p.plot(ra, 90, 'k-')
#dec = np.arange(-90, 91, 5)
#p.plot(0, dec, 'r-')
#p.plot(45, dec, 'g-')
#p.plot(90, dec, 'b-')
#p.plot(135, dec, 'c-')
#p.plot(180, dec, 'k-')
#p.plot(225, dec, 'g-')
#p.plot(270, dec, 'b-')
#p.plot(315, dec, 'c-')
#p.plot(350, dec, 'k-')
#conv = {3: tools.toFloat}
#filename = "../twowheel/brighthd.txt"
#data = np.loadtxt(filename, delimiter=";", usecols=(0,1,2, 3), converters=conv)
#p.plot(data[:,0], data[:,1], 'k,')
p.plotGrid()
plotEcliptic(p)
##############################################################3
##############################################################3
##############################################################3
##############################################################3
def plotEcliptic(maptype=Projection()):
"""Plot Ra and Dec of ecliptic
Taken from http://www.dur.ac.uk/john.lucey/users/solar_year.html
His lambda is equal to ra, according to
http://www.princeton.edu/~achaney/tmve/wiki100k/docs/Ecliptic_coordinate_system.html
"""
ra = np.empty(360)
dec = np.empty(360)
for i in np.arange(360):
ra[i] = i + 2.45*np.sin (2 * i * np.pi/180.)
dec[i] =23.5*np.sin( i*np.pi/180.)
maptype.plotLine(ra, dec, 'r-', lw=4, label="Ecliptic")
| mit |
vibhorag/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
TaylorOshan/pysal | pysal/esda/tests/test_getisord.py | 6 | 2987 | import unittest
import numpy as np
from .. import getisord
from ...weights.Distance import DistanceBand
from ...common import pandas
POINTS = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
W = DistanceBand(POINTS, threshold=15)
Y = np.array([2, 3, 3.2, 5, 8, 7])
PANDAS_EXTINCT = pandas is None
class G_Tester(unittest.TestCase):
def setUp(self):
self.w = W
self.y = Y
np.random.seed(10)
def test_G(self):
g = getisord.G(self.y, self.w)
self.assertAlmostEquals(g.G, 0.55709779, places=8)
self.assertAlmostEquals(g.p_norm, 0.1729, places=4)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns=['y'])
np.random.seed(12345)
r1 = getisord.G.by_col(df, ['y'], w=self.w)
this_getisord = np.unique(r1.y_g.values)
this_pval = np.unique(r1.y_p_sim.values)
np.random.seed(12345)
stat = getisord.G(self.y, self.w)
self.assertAlmostEquals(this_getisord, stat._statistic)
self.assertAlmostEquals(this_pval, stat.p_sim)
class G_Local_Tester(unittest.TestCase):
def setUp(self):
self.w = W
self.y = Y
np.random.seed(10)
def test_G_Local_Binary(self):
lg = getisord.G_Local(self.y, self.w, transform='B')
self.assertAlmostEquals(lg.Zs[0], -1.0136729, places=7)
self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7)
def test_G_Local_Row_Standardized(self):
lg = getisord.G_Local(self.y, self.w, transform='R')
self.assertAlmostEquals(lg.Zs[0], -0.62074534, places=7)
self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7)
def test_G_star_Local_Binary(self):
lg = getisord.G_Local(self.y, self.w, transform='B', star=True)
self.assertAlmostEquals(lg.Zs[0], -1.39727626, places=8)
self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7)
def test_G_star_Row_Standardized(self):
lg = getisord.G_Local(self.y, self.w, transform='R', star=True)
self.assertAlmostEquals(lg.Zs[0], -0.62488094, places=8)
self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns=['y'])
np.random.seed(12345)
r1 = getisord.G_Local.by_col(df, ['y'], w=self.w)
np.random.seed(12345)
stat = getisord.G_Local(self.y, self.w)
np.testing.assert_allclose(r1.y_g_local.values, stat.Gs)
np.testing.assert_allclose(r1.y_p_sim, stat.p_sim)
suite = unittest.TestSuite()
test_classes = [G_Tester, G_Local_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
Goodluckhf/chatbot | classificator/extra_metrics.py | 1 | 3183 | from __future__ import absolute_import
import six
from keras import backend as K
from keras.losses import mean_squared_error
from keras.losses import mean_absolute_error
from keras.losses import mean_absolute_percentage_error
from keras.losses import mean_squared_logarithmic_error
from keras.losses import hinge
from keras.losses import squared_hinge
from keras.losses import categorical_crossentropy
from keras.losses import sparse_categorical_crossentropy
from keras.losses import binary_crossentropy
from keras.losses import kullback_leibler_divergence
from keras.losses import poisson
from keras.losses import cosine_proximity
from keras.utils.generic_utils import deserialize_keras_object
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def fbeta_score(y_true, y_pred, beta=1):
"""Computes the F score.
The F score is the weighted harmonic mean of precision and recall.
Here it is only computed as a batch-wise average, not globally.
This is useful for multi-label classification, where input samples can be
classified as sets of labels. By only using accuracy (precision) a model
would achieve a perfect score by simply assigning every class to every
input. In order to avoid this, a metric should penalize incorrect class
assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
computes this, as a weighted mean of the proportion of correct class
assignments vs. the proportion of incorrect class assignments.
With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
correct classes becomes more important, and with beta > 1 the metric is
instead weighted towards penalizing incorrect class assignments.
"""
if beta < 0:
raise ValueError('The lowest choosable beta is zero (only precision).')
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
def fmeasure(y_true, y_pred):
"""Computes the f-measure, the harmonic mean of precision and recall.
Here it is only computed as a batch-wise average, not globally.
"""
return fbeta_score(y_true, y_pred, beta=1) | apache-2.0 |
chrisnatali/networkx | examples/drawing/atlas.py | 54 | 2609 | #!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas=graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U=nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree=[n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U=nx.disjoint_union(U,G)
# list of graphs of all connected components
C=nx.connected_component_subgraphs(U)
UU=nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist=[] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G,nlist):
nlist.append(G)
UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1,G2):
return True
return False
if __name__ == '__main__':
import networkx as nx
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
print(nx.number_connected_components(G),"connected components")
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
import matplotlib.pyplot as plt
plt.figure(1,figsize=(8,8))
# layout graphs with positions using graphviz neato
pos=nx.graphviz_layout(G,prog="neato")
# color nodes the same in each connected subgraph
C=nx.connected_component_subgraphs(G)
for g in C:
c=[random.random()]*nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png",dpi=75)
| bsd-3-clause |
gotomypc/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
craigcitro/pydatalab | datalab/stackdriver/monitoring/_metric.py | 5 | 3350 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Provides the MetricDescriptors in the monitoring API."""
from __future__ import absolute_import
from builtins import object
import fnmatch
import pandas
from . import _utils
class MetricDescriptors(object):
"""MetricDescriptors object for retrieving the metric descriptors."""
_DISPLAY_HEADERS = ('Metric type', 'Display name', 'Kind', 'Value', 'Unit',
'Labels')
def __init__(self, filter_string=None, type_prefix=None,
project_id=None, context=None):
"""Initializes the MetricDescriptors based on the specified filters.
Args:
filter_string: An optional filter expression describing the resource
descriptors to be returned.
type_prefix: An optional prefix constraining the selected metric types.
This adds ``metric.type = starts_with("<prefix>")`` to the filter.
project_id: An optional project ID or number to override the one provided
by the context.
context: An optional Context object to use instead of the global default.
"""
self._client = _utils.make_client(project_id, context)
self._filter_string = filter_string
self._type_prefix = type_prefix
self._descriptors = None
def list(self, pattern='*'):
"""Returns a list of metric descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"compute*"``,
``"*cpu/load_??m"``.
Returns:
A list of MetricDescriptor objects that match the filters.
"""
if self._descriptors is None:
self._descriptors = self._client.list_metric_descriptors(
filter_string=self._filter_string, type_prefix=self._type_prefix)
return [metric for metric in self._descriptors
if fnmatch.fnmatch(metric.type, pattern)]
def as_dataframe(self, pattern='*', max_rows=None):
"""Creates a pandas dataframe from the descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"compute*"``,
``"*/cpu/load_??m"``.
max_rows: The maximum number of descriptors to return. If None, return
all.
Returns:
A pandas dataframe containing matching metric descriptors.
"""
data = []
for i, metric in enumerate(self.list(pattern)):
if max_rows is not None and i >= max_rows:
break
labels = ', '. join([l.key for l in metric.labels])
data.append([
metric.type, metric.display_name, metric.metric_kind,
metric.value_type, metric.unit, labels])
return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)
| apache-2.0 |
gfyoung/scipy | scipy/ndimage/io.py | 8 | 1249 | from __future__ import division, print_function, absolute_import
import numpy as np
_have_pil = True
try:
from scipy.misc.pilutil import imread as _imread
except ImportError:
_have_pil = False
__all__ = ['imread']
# Use the implementation of `imread` in `scipy.misc.pilutil.imread`.
# If it weren't for the different names of the first arguments of
# ndimage.io.imread and misc.pilutil.imread, we could simplify this file
# by writing
# from scipy.misc.pilutil import imread
# Unfortunately, because the argument names are different, that
# introduces a backwards incompatibility.
@np.deprecate(message="`imread` is deprecated in SciPy 1.0.0.\n"
"Use ``matplotlib.pyplot.imread`` instead.")
def imread(fname, flatten=False, mode=None):
if _have_pil:
return _imread(fname, flatten, mode)
raise ImportError("Could not import the Python Imaging Library (PIL)"
" required to load image files. Please refer to"
" http://pillow.readthedocs.org/en/latest/installation.html"
" for installation instructions.")
if _have_pil and _imread.__doc__ is not None:
imread.__doc__ = _imread.__doc__.replace('name : str', 'fname : str')
| bsd-3-clause |
architecture-building-systems/CityEnergyAnalyst | cea/utilities/create_polygon.py | 2 | 1483 | """
Creates a polygon shapefile from a list of comma-separated coordinate tuples and places it in building geometry folder
"""
import os
import cea.config
import cea.inputlocator
from cea.utilities.standardize_coordinates import get_geographic_coordinate_system
import geopandas as gpd
from shapely.geometry import Polygon
__author__ = "Reynold Mok"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Reynold Mok"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Reynold Mok"
__email__ = "[email protected]"
__status__ = "Production"
def create_polygon(coordinate_tuple_list, output_path, filename):
poly = Polygon(coordinate_tuple_list)
gdf = gpd.GeoDataFrame([{'geometry': poly}])
gdf.crs = get_geographic_coordinate_system()
# Make sure directory exists
if not os.path.exists(output_path):
os.makedirs(output_path)
gdf.to_file(os.path.join(output_path, '{filename}.shp'.format(filename=filename)))
print('Polygon `{filename}` created in {output_path}'.format(filename=filename, output_path=output_path))
def main(config):
coordinate_tuple_list = config.create_polygon.coordinates
filename = config.create_polygon.filename
locator = cea.inputlocator.InputLocator(config.scenario)
output_path = locator.get_building_geometry_folder()
create_polygon(coordinate_tuple_list, output_path, filename)
if __name__ == '__main__':
main(cea.config.Configuration())
| mit |
geodynamics/burnman | misc/benchmarks/olivine_phase_diagram_benchmark.py | 7 | 4021 | from __future__ import absolute_import
# Benchmarks for the chemical potential functions
import os.path
import sys
sys.path.insert(1, os.path.abspath('../..'))
import burnman
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy import optimize
# Equilibrium functions
def eqm_P_xMgB(A, B):
def eqm(arg, T, xMgA):
P = arg[0]
xMgB = arg[1]
A.set_composition([xMgA, 1.0 - xMgA])
A.set_state(P, T)
B.set_composition([xMgB, 1.0 - xMgB])
B.set_state(P, T)
diff_mu_Mg2SiO4 = A.partial_gibbs[0] - B.partial_gibbs[0]
diff_mu_Fe2SiO4 = A.partial_gibbs[1] - B.partial_gibbs[1]
return [diff_mu_Mg2SiO4, diff_mu_Fe2SiO4]
return eqm
def eqm_P_xMgABC(A, B, C):
def eqm(arg, T):
P = arg[0]
xMgA = arg[1]
xMgB = arg[2]
xMgC = arg[3]
A.set_composition([xMgA, 1.0 - xMgA])
A.set_state(P, T)
B.set_composition([xMgB, 1.0 - xMgB])
B.set_state(P, T)
C.set_composition([xMgC, 1.0 - xMgC])
C.set_state(P, T)
diff_mu_Mg2SiO4_0 = A.partial_gibbs[0] - B.partial_gibbs[0]
diff_mu_Fe2SiO4_0 = A.partial_gibbs[1] - B.partial_gibbs[1]
diff_mu_Mg2SiO4_1 = A.partial_gibbs[0] - C.partial_gibbs[0]
diff_mu_Fe2SiO4_1 = A.partial_gibbs[1] - C.partial_gibbs[1]
return [diff_mu_Mg2SiO4_0, diff_mu_Fe2SiO4_0, diff_mu_Mg2SiO4_1, diff_mu_Fe2SiO4_1]
return eqm
'''
Initialise solid solutions
'''
ol = burnman.minerals.SLB_2011.mg_fe_olivine()
wd = burnman.minerals.SLB_2011.mg_fe_wadsleyite()
rw = burnman.minerals.SLB_2011.mg_fe_ringwoodite()
'''
Temperature of phase diagram
'''
T = 1673. # K
'''
Find invariant point
'''
invariant = optimize.fsolve(
eqm_P_xMgABC(ol, wd, rw), [15.e9, 0.2, 0.3, 0.4], args=(T))
print(str(invariant[0] / 1.e9) + ' GPa')
print(invariant[1:4])
'''
Initialise arrays
'''
XMgA_ol_wad = np.linspace(invariant[1], 0.9999, 21)
XMgA_ol_rw = np.linspace(0.0001, invariant[1], 21)
XMgA_wad_rw = np.linspace(invariant[2], 0.9999, 21)
P_ol_wad = np.empty_like(XMgA_ol_wad)
XMgB_ol_wad = np.empty_like(XMgA_ol_wad)
P_ol_rw = np.empty_like(XMgA_ol_wad)
XMgB_ol_rw = np.empty_like(XMgA_ol_wad)
P_wad_rw = np.empty_like(XMgA_ol_wad)
XMgB_wad_rw = np.empty_like(XMgA_ol_wad)
'''
Find transition pressures
'''
for idx, XMgA in enumerate(XMgA_ol_wad):
XMgB_guess = 1.0 - ((1.0 - XMgA_ol_wad[idx]) * 0.8)
P_ol_wad[idx], XMgB_ol_wad[idx] = optimize.fsolve(
eqm_P_xMgB(ol, wd), [5.e9, XMgB_guess], args=(T, XMgA_ol_wad[idx]))
XMgB_guess = 1.0 - ((1.0 - XMgA_ol_rw[idx]) * 0.8)
P_ol_rw[idx], XMgB_ol_rw[idx] = optimize.fsolve(
eqm_P_xMgB(ol, rw), [5.e9, XMgB_guess], args=(T, XMgA_ol_rw[idx]))
XMgB_guess = 1.0 - ((1.0 - XMgA_wad_rw[idx]) * 0.8)
P_wad_rw[idx], XMgB_wad_rw[idx] = optimize.fsolve(
eqm_P_xMgB(wd, rw), [5.e9, XMgB_guess], args=(T, XMgA_wad_rw[idx]))
'''
Plot model
'''
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig10a.png')
plt.imshow(fig1, extent=[0, 1, 0., 30.], aspect='auto')
plt.plot(1.0 - np.array([invariant[1], invariant[2], invariant[3]]), np.array(
[invariant[0], invariant[0], invariant[0]]) / 1.e9, color='black', linewidth=2, label='invariant')
plt.plot(1.0 - XMgA_ol_wad, P_ol_wad / 1.e9,
'r-', linewidth=2, label='wad-out (ol, wad)')
plt.plot(1.0 - XMgB_ol_wad, P_ol_wad / 1.e9,
'g-', linewidth=2, label='ol-out (ol, wad)')
plt.plot(1.0 - XMgA_ol_rw, P_ol_rw / 1.e9,
'r-', linewidth=2, label='rw-out (ol, rw)')
plt.plot(1.0 - XMgB_ol_rw, P_ol_rw / 1.e9, 'b-',
linewidth=2, label='ol-out (ol, rw)')
plt.plot(1.0 - XMgA_wad_rw, P_wad_rw / 1.e9,
'g-', linewidth=2, label='rw-out (wad, rw)')
plt.plot(1.0 - XMgB_wad_rw, P_wad_rw / 1.e9,
'b-', linewidth=2, label='wad-out (wad, rw)')
plt.title('Mg2SiO4-Fe2SiO4 phase diagram')
plt.xlabel("X_Fe")
plt.ylabel("Pressure (GPa)")
plt.legend(loc='upper right')
plt.show()
| gpl-2.0 |
massmutual/scikit-learn | sklearn/metrics/tests/test_classification.py | 53 | 49781 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e460.py | 2 | 6598 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
N_SEQ_PER_BATCH = 64
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
window=("2013-03-18", None),
train_buildings=[1],
validation_buildings=[1],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
standardise_targets=True,
ignore_incomplete=True
# offset_probability=0.5,
# ignore_offset_activations=True
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
5000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name, target_appliance, seq_length):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
target_appliance=target_appliance,
logger=logging.getLogger(name),
seq_length=seq_length
))
source = SameLocation(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 8
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'label': 'dense0',
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'label': 'dense2',
'type': DenseLayer,
'num_units': 16,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, seq_length - 3, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def main():
APPLIANCES = [
('a', 'fridge freezer', 512),
('b', 'coffee maker', 512),
('c', 'dish washer', 2000),
('d', 'hair dryer', 256),
('e', 'kettle', 256),
('f', 'oven', 2000),
('g', 'toaster', 256),
('h', 'light', 2000),
('i', 'washer dryer', 2000)
]
for experiment, appliance, seq_length in APPLIANCES:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, 'a', full_exp_name)
func_call = func_call[:-1] + ", '{}', {})".format(appliance, seq_length)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=20000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source
del net
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e460.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
gengliangwang/spark | python/pyspark/pandas/spark/utils.py | 15 | 6409 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Helpers and utilities to deal with PySpark instances
"""
from typing import overload
from pyspark.sql.types import DecimalType, StructType, MapType, ArrayType, StructField, DataType
@overload
def as_nullable_spark_type(dt: StructType) -> StructType:
...
@overload
def as_nullable_spark_type(dt: ArrayType) -> ArrayType:
...
@overload
def as_nullable_spark_type(dt: MapType) -> MapType:
...
@overload
def as_nullable_spark_type(dt: DataType) -> DataType:
...
def as_nullable_spark_type(dt: DataType) -> DataType:
"""
Returns a nullable schema or data types.
Examples
--------
>>> from pyspark.sql.types import *
>>> as_nullable_spark_type(StructType([
... StructField("A", IntegerType(), True),
... StructField("B", FloatType(), False)])) # doctest: +NORMALIZE_WHITESPACE
StructType(List(StructField(A,IntegerType,true),StructField(B,FloatType,true)))
>>> as_nullable_spark_type(StructType([
... StructField("A",
... StructType([
... StructField('a',
... MapType(IntegerType(),
... ArrayType(IntegerType(), False), False), False),
... StructField('b', StringType(), True)])),
... StructField("B", FloatType(), False)])) # doctest: +NORMALIZE_WHITESPACE
StructType(List(StructField(A,StructType(List(StructField(a,MapType(IntegerType,ArrayType\
(IntegerType,true),true),true),StructField(b,StringType,true))),true),\
StructField(B,FloatType,true)))
"""
if isinstance(dt, StructType):
new_fields = []
for field in dt.fields:
new_fields.append(
StructField(
field.name,
as_nullable_spark_type(field.dataType),
nullable=True,
metadata=field.metadata,
)
)
return StructType(new_fields)
elif isinstance(dt, ArrayType):
return ArrayType(as_nullable_spark_type(dt.elementType), containsNull=True)
elif isinstance(dt, MapType):
return MapType(
as_nullable_spark_type(dt.keyType),
as_nullable_spark_type(dt.valueType),
valueContainsNull=True,
)
else:
return dt
@overload
def force_decimal_precision_scale(
dt: StructType, *, precision: int = ..., scale: int = ...
) -> StructType:
...
@overload
def force_decimal_precision_scale(
dt: ArrayType, *, precision: int = ..., scale: int = ...
) -> ArrayType:
...
@overload
def force_decimal_precision_scale(
dt: MapType, *, precision: int = ..., scale: int = ...
) -> MapType:
...
@overload
def force_decimal_precision_scale(
dt: DataType, *, precision: int = ..., scale: int = ...
) -> DataType:
...
def force_decimal_precision_scale(
dt: DataType, *, precision: int = 38, scale: int = 18
) -> DataType:
"""
Returns a data type with a fixed decimal type.
The precision and scale of the decimal type are fixed with the given values.
Examples
--------
>>> from pyspark.sql.types import *
>>> force_decimal_precision_scale(StructType([
... StructField("A", DecimalType(10, 0), True),
... StructField("B", DecimalType(14, 7), False)])) # doctest: +NORMALIZE_WHITESPACE
StructType(List(StructField(A,DecimalType(38,18),true),StructField(B,DecimalType(38,18),false)))
>>> force_decimal_precision_scale(StructType([
... StructField("A",
... StructType([
... StructField('a',
... MapType(DecimalType(5, 0),
... ArrayType(DecimalType(20, 0), False), False), False),
... StructField('b', StringType(), True)])),
... StructField("B", DecimalType(30, 15), False)]),
... precision=30, scale=15) # doctest: +NORMALIZE_WHITESPACE
StructType(List(StructField(A,StructType(List(StructField(a,MapType(DecimalType(30,15),\
ArrayType(DecimalType(30,15),false),false),false),StructField(b,StringType,true))),true),\
StructField(B,DecimalType(30,15),false)))
"""
if isinstance(dt, StructType):
new_fields = []
for field in dt.fields:
new_fields.append(
StructField(
field.name,
force_decimal_precision_scale(field.dataType, precision=precision, scale=scale),
nullable=field.nullable,
metadata=field.metadata,
)
)
return StructType(new_fields)
elif isinstance(dt, ArrayType):
return ArrayType(
force_decimal_precision_scale(dt.elementType, precision=precision, scale=scale),
containsNull=dt.containsNull,
)
elif isinstance(dt, MapType):
return MapType(
force_decimal_precision_scale(dt.keyType, precision=precision, scale=scale),
force_decimal_precision_scale(dt.valueType, precision=precision, scale=scale),
valueContainsNull=dt.valueContainsNull,
)
elif isinstance(dt, DecimalType):
return DecimalType(precision=precision, scale=scale)
else:
return dt
def _test() -> None:
import doctest
import sys
import pyspark.pandas.spark.utils
globs = pyspark.pandas.spark.utils.__dict__.copy()
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.spark.utils,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
HIPERFIT/futhark | tools/benchmark-performance-plot.py | 3 | 3759 | #!/usr/bin/env python
import numpy as np
import sys
import json
import re
import subprocess
import datetime
import matplotlib
matplotlib.use('Agg') # For headless use
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import os
_, results_dir, machine, benchmark, plotfile = sys.argv
def compute_commit_timestamps():
log = subprocess.check_output(['git', 'log', '--pretty=format:%H %at'])
result = {}
for line in log.split('\n'):
commit,timestamp = line.split(' ')
result[commit] = int(timestamp)
return result
def remove_nones(l):
return filter(lambda x: x is not None, l)
commit_timestamps = compute_commit_timestamps()
def cut_desc(s):
if s[0] == '#':
return s.split(' ')[0]
else:
return s
def extract_result(filename):
match = re.search('^{}-([a-f0-9]+).json$'.format(machine), filename)
if match:
commit=match.group(1)
results = json.load(open(results_dir + '/' + filename))
try:
benchmark_results = results['futhark-benchmarks/'+benchmark]
def get_runtime(r):
for dataset in r:
if type(r[dataset]) is dict:
return np.mean(r[dataset]['runtimes'])
runtimes={}
for dataset in benchmark_results['datasets']:
if type(benchmark_results['datasets'][dataset]) is dict:
runtimes[cut_desc(dataset)] = np.mean(benchmark_results['datasets'][dataset]['runtimes'])
return {'timestamp': commit_timestamps[commit],
'commit': commit,
'runtimes': runtimes}
except:
return None
results = remove_nones(map (extract_result,
os.listdir(results_dir)))
results.sort(key=lambda x: x['timestamp'])
if len(results) == 0:
sys.exit('No results found for benchmark {}.'.format(benchmark))
best = {}
def from_unixtime(timestamp):
return datetime.datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d')
for r in results:
time = from_unixtime(r['timestamp'])
for dataset in r['runtimes']:
if dataset not in best or r['runtimes'][dataset] < best[dataset]['runtime']:
best[dataset] = {'runtime': r['runtimes'][dataset],
'timestamp': r['timestamp'],
'commit': r['commit'] }
print r['commit'], dataset, time, r['runtimes'][dataset]
for dataset in sorted(best.keys()):
best_time = from_unixtime(best[dataset]['timestamp'])
print 'Dataset {} best: {} {} {}'.format(dataset, best_time, best[dataset]['commit'], best[dataset]['runtime'])
def make_xticks(results):
times = np.array(map(lambda x: from_unixtime(x['timestamp']), results))
return times[np.arange(0,len(times),len(times)/10)]
fig, ax = plt.subplots()
ax.set_title(benchmark)
ax.set_ylabel('Slowdown compared to fastest')
for dataset in sorted(best.keys()):
best_runtime=best[dataset]['runtime']
xs=[]
ys=[]
i = 0
for r in results:
if dataset in r['runtimes']:
xs += [i]
ys += [r['runtimes'][dataset]/best_runtime]
i += 1
ax.plot(xs, ys, label=dataset)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
grey='#aaaaaa'
xticks=make_xticks(results)
ax.set_yscale('log')
ax.set_ylim(ymin=0.9,ymax=3)
ax.yaxis.grid(color=grey,zorder=0)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax.yaxis.set_minor_formatter(FormatStrFormatter(''))
ax.set_yticks(np.arange(0.9,3,0.1))
ax.set_xticks(1+np.arange(len(xticks))*(len(results)/10))
ax.set_xticklabels(xticks, rotation=-45)
plt.rc('text')
plt.savefig(plotfile, bbox_inches='tight')
| isc |
drpeteb/scipy | scipy/stats/_multivariate.py | 35 | 69253 | #
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln
from scipy._lib._util import check_random_state
__all__ = ['multivariate_normal', 'dirichlet', 'wishart', 'invwishart']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
def _process_parameters(dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
_doc_frozen_callparams = ""
_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
docdict_params = {
'_doc_default_callparams': _doc_default_callparams,
'_doc_callparams_note': _doc_callparams_note,
'_doc_random_state': _doc_random_state
}
docdict_noparams = {
'_doc_default_callparams': _doc_frozen_callparams,
'_doc_callparams_note': _doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean, cov, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean, cov, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
x = _process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, mean, cov = _process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self.dim, self.mean, self.cov = _process_parameters(None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
self._dist = multivariate_normal_gen(seed)
def logpdf(self, x):
x = _process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) <= 0:
raise ValueError("Each entry in 'x' must be greater than zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
| bsd-3-clause |
lukw00/shogun | examples/undocumented/python_modular/graphical/metric_lmnn_objective.py | 26 | 2350 | #!/usr/bin/env python
def load_compressed_features(fname_features):
try:
import gzip
import numpy
except ImportError:
print 'Error importing gzip and/or numpy modules. Please, verify their installation.'
import sys
sys.exit(0)
# load features from a gz compressed file
file_features = gzip.GzipFile(fname_features)
str_features = file_features.read()
file_features.close()
strlist_features = str_features.split('\n')[:-1] # all but last because the last line also has \n
# the number of lines in the file is the number of vectors
num_vectors = len(strlist_features)
# the number of elements in a line is the number of features
num_features = len(strlist_features[0].split())
# memory pre-allocation for the feature matrix
fm = numpy.zeros((num_vectors, num_features))
# fill in feature matrix
for i in xrange(num_vectors):
try:
fm[i,:] = map(numpy.float64, strlist_features[i].split())
except ValuError:
print 'All the vectors must have the same number of features.'
import sys
sys.exit(0)
return fm
def metric_lmnn_statistics(k=3, fname_features='../../data/fm_train_multiclass_digits.dat.gz', fname_labels='../../data/label_train_multiclass_digits.dat'):
try:
from modshogun import LMNN, CSVFile, RealFeatures, MulticlassLabels, MSG_DEBUG
import matplotlib.pyplot as pyplot
except ImportError:
print 'Error importing modshogun or other required modules. Please, verify their installation.'
return
features = RealFeatures(load_compressed_features(fname_features).T)
labels = MulticlassLabels(CSVFile(fname_labels))
# print 'number of examples = %d' % features.get_num_vectors()
# print 'number of features = %d' % features.get_num_features()
assert(features.get_num_vectors() == labels.get_num_labels())
# train LMNN
lmnn = LMNN(features, labels, k)
lmnn.set_correction(100)
# lmnn.io.set_loglevel(MSG_DEBUG)
print 'Training LMNN, this will take about two minutes...'
lmnn.train()
print 'Training done!'
# plot objective obtained during training
statistics = lmnn.get_statistics()
pyplot.plot(statistics.obj.get())
pyplot.grid(True)
pyplot.xlabel('Iterations')
pyplot.ylabel('LMNN objective')
pyplot.title('LMNN objective during training for the multiclass digits data set')
pyplot.show()
if __name__=='__main__':
print('LMNN objective')
metric_lmnn_statistics()
| gpl-3.0 |
EmergentSystemLabStudent/NPB_DAA | sample/summary_summary_without_ARI.py | 1 | 1722 | #%%
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pathlib import Path
import re
#%%
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--result_dir", type=Path, required=True)
args = parser.parse_args()
#%%
dirs = [dir for dir in args.result_dir.iterdir() if dir.is_dir() and re.match(r"^[0-9]+$", dir.stem)]
dirs.sort(key=lambda dir: dir.stem)
#%%
Path("figures").mkdir(exist_ok=True)
Path("summary_files").mkdir(exist_ok=True)
#%%
print("Initialize variables....")
N = len(dirs)
tmp = np.loadtxt(dirs[0] / "summary_files/resample_times.txt")
T = tmp.shape[0]
resample_times = np.empty((N, T))
log_likelihoods = np.empty((N, T+1))
print("Done!")
#%%
print("Loading results....")
for i, dir in enumerate(dirs):
resample_times[i] = np.loadtxt(dir / "summary_files/resample_times.txt")
log_likelihoods[i] = np.loadtxt(dir / "summary_files/log_likelihood.txt")
print("Done!")
#%%
print("Ploting...")
plt.clf()
plt.errorbar(range(T), resample_times.mean(axis=0), yerr=resample_times.std(axis=0))
plt.xlabel("Iteration")
plt.ylabel("Execution time [sec]")
plt.title("Transitions of the execution time")
plt.savefig("figures/summary_of_execution_time.png")
plt.clf()
plt.errorbar(range(T+1), log_likelihoods.mean(axis=0), yerr=log_likelihoods.std(axis=0))
plt.xlabel("Iteration")
plt.ylabel("Log likelihood")
plt.title("Transitions of the log likelihood")
plt.savefig("figures/summary_of_log_likelihood.png")
print("Done!")
#%%
print("Save npy files...")
np.save("summary_files/resample_times.npy", resample_times)
np.save("summary_files/log_likelihoods.npy", log_likelihoods)
print("Done!")
| mit |
percyfal/bokeh | bokeh/sampledata/periodic_table.py | 15 | 1575 | '''
This module provides the periodic table as a data set. It exposes an attribute 'elements'
which is a pandas dataframe with the following fields
elements['atomic Number'] (units: g/cm^3)
elements['symbol']
elements['name']
elements['atomic mass'] (units: amu)
elements['CPK'] (convention for molecular modeling color)
elements['electronic configuration']
elements['electronegativity'] (units: Pauling)
elements['atomic radius'] (units: pm)
elements['ionic radius'] (units: pm)
elements['van der waals radius'] (units: pm)
elements['ionization enerygy'] (units: kJ/mol)
elements['electron affinity'] (units: kJ/mol)
elements['phase'] (standard state: solid, liquid, gas)
elements['bonding type']
elements['melting point'] (units: K)
elements['boiling point'] (units: K)
elements['density'] (units: g/cm^3)
elements['type'] (see below)
elements['year discovered']
elements['group']
elements['period']
element types: actinoid, alkali metal, alkaline earth metal, halogen, lanthanoid, metal, metalloid, noble gas, nonmetal, transition metalloid
'''
from __future__ import absolute_import
from bokeh.util.dependencies import import_required
pd = import_required('pandas',
'periodic_table sample data requires Pandas (http://pandas.pydata.org) to be installed')
from os.path import dirname, join
elements = pd.read_csv(join(dirname(__file__), 'elements.csv'))
| bsd-3-clause |
johnson1228/pymatgen | pymatgen/analysis/graphs.py | 4 | 39584 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import warnings
import subprocess
import numpy as np
import os.path
from pymatgen.core import Structure, Lattice, PeriodicSite, Molecule
from pymatgen.util.coord import lattice_points_in_supercell
from pymatgen.vis.structure_vtk import EL_COLORS
from monty.json import MSONable
from monty.os.path import which
from operator import itemgetter
from collections import namedtuple
from scipy.spatial import KDTree
try:
import networkx as nx
from networkx.readwrite import json_graph
from networkx.drawing.nx_agraph import write_dot
except ImportError:
raise ImportError("This module requires the NetworkX "
"graph library to be installed.")
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
__author__ = "Matthew Horton"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "[email protected]"
__status__ = "Beta"
__date__ = "August 2017"
ConnectedSite = namedtuple('ConnectedSite', 'periodic_site, jimage, index, weight, dist')
class StructureGraph(MSONable):
def __init__(self, structure, graph_data):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given crystallographic
structure easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
For periodic graphs, class stores information on the graph
edges of what lattice image the edge belongs to.
:param *args: same as in :class: `pymatgen.core.Structure`
:param graph_data: dict containing graph information in
dict format, not intended to be constructed manually
"""
self.structure = structure
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if 'id' in d:
del d['id']
if 'key' in d:
del d['key']
@classmethod
def with_empty_graph(cls, structure, name="bonds",
edge_weight_name=None,
edge_weight_units=None):
"""
Constructor for StructureGraph, returns a StructureGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Structure).
:param structure (Structure):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (StructureGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError("Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless.")
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name)
graph.add_nodes_from(range(len(structure)))
graph_data = json_graph.adjacency_data(graph)
return cls(structure, graph_data=graph_data)
@staticmethod
def with_local_env_strategy(structure, strategy):
"""
Constructor for StructureGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param structure: Structure object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors`
object
:return:
"""
sg = StructureGraph.with_empty_graph(structure, name="bonds",
edge_weight_name="weight",
edge_weight_units="")
for n in range(len(structure)):
neighbors = strategy.get_nn_info(structure, n)
for neighbor in neighbors:
# local_env will always try to add two edges
# for any one bond, one from site u to site v
# and another form site v to site u: this is
# harmless, so warn_duplicates=False
sg.add_edge(from_index=n,
from_jimage=(0, 0, 0),
to_index=neighbor['site_index'],
to_jimage=neighbor['image'],
weight=neighbor['weight'],
warn_duplicates=False)
return sg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph['name']
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph['edge_weight_name']
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph['edge_weight_units']
def add_edge(self, from_index, to_index,
from_jimage=(0, 0, 0), to_jimage=None,
weight=None, warn_duplicates=True):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param from_jimage (tuple of ints): lattice vector of periodic
image, e.g. (1, 0, 0) for periodic image in +x direction
:param to_jimage (tuple of ints): lattice vector of image
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
to_jimage, from_jimage = from_jimage, to_jimage
# constrain all from_jimages to be (0, 0, 0),
# initial version of this class worked even if
# from_jimage != (0, 0, 0), but making this
# assumption simplifies logic later
if not np.array_equal(from_jimage, (0, 0, 0)):
shift = from_jimage
from_jimage = np.subtract(from_jimage, shift)
to_jimage = np.subtract(to_jimage, shift)
# automatic detection of to_jimage if user doesn't specify
# will try and detect all equivalent images and add multiple
# edges if appropriate
if to_jimage is None:
# assume we want the closest site
warnings.warn("Please specify to_jimage to be unambiguous, "
"trying to automatically detect.")
dist, to_jimage = self.structure[from_index].distance_and_image(self.structure[to_index])
if dist == 0:
# this will happen when from_index == to_index,
# typically in primitive single-atom lattices
images = [1, 0, 0], [0, 1, 0], [0, 0, 1]
dists = []
for image in images:
dists.append(self.structure[from_index].distance_and_image(self.structure[from_index],
jimage=image)[0])
dist = min(dists)
equiv_sites = self.structure.get_neighbors_in_shell(self.structure[from_index].coords,
dist,
dist*0.01,
include_index=True)
for site, dist, to_index in equiv_sites:
to_jimage = np.subtract(site.frac_coords, self.structure[from_index].frac_coords)
to_jimage = to_jimage.astype(int)
self.add_edge(from_index=from_index, from_jimage=(0, 0, 0),
to_jimage=to_jimage, to_index=to_index)
return
from_jimage, to_jimage = tuple(from_jimage), tuple(to_jimage)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between a given (site, jimage) pair and another
# (site, jimage) pair
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data:
for key, d in existing_edge_data.items():
if d["to_jimage"] == to_jimage:
if warn_duplicates:
warnings.warn("Trying to add an edge that already exists from "
"site {} to site {} in {}.".format(from_index,
to_index,
to_jimage))
return
if weight:
self.graph.add_edge(from_index, to_index,
from_jimage=from_jimage, to_jimage=to_jimage,
weight=weight)
else:
self.graph.add_edge(from_index, to_index,
from_jimage=from_jimage, to_jimage=to_jimage)
def get_connected_sites(self, n, jimage=(0, 0, 0)):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Structure
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
out_edges = [(u, v, d, 'out') for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d, 'in') for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d, dir in out_edges + in_edges:
to_jimage = d['to_jimage']
if dir == 'in':
u, v = v, u
to_jimage = np.multiply(-1, to_jimage)
site_d = self.structure[v].as_dict()
site_d['abc'] = np.add(site_d['abc'], to_jimage).tolist()
to_jimage = tuple(map(int, np.add(to_jimage, jimage)))
periodic_site = PeriodicSite.from_dict(site_d)
weight = d.get('weight', None)
# from_site if jimage arg != (0, 0, 0)
relative_jimage = np.subtract(to_jimage, jimage)
dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage)
connected_site = ConnectedSite(periodic_site=periodic_site,
jimage=to_jimage,
index=v,
weight=weight,
dist=dist)
connected_sites.add(connected_site)
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
return self.graph.degree(n)
def draw_graph_to_file(self, filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp"):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires "
"GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {'nodesep': 10.0, 'dpi': 300, 'overlap': "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.structure[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = '#000000' if 1 - (c[0] * 0.299 + c[1] * 0.587
+ c[2] * 0.114) / 255 < 0.5 else '#ffffff'
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(n, fillcolor=color, fontcolor=fontcolor, label=label,
fontname="Helvetica-bold", style="filled", shape="circle")
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
to_image = d['to_jimage']
# set edge style
d['style'] = "solid"
if to_image != (0, 0, 0):
d['style'] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d['arrowhead'] = "none"
# only add labels for images that are not the origin
if image_labels:
d['headlabel'] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d['arrowhead'] = "normal" if d['headlabel'] else "none"
# optionally color edges using node colors
color_u = g.node[u]['fillcolor']
color_v = g.node[v]['fillcolor']
d['color_uv'] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get('edge_weight_units', "")
if d.get('weight'):
d['label'] = "{:.2f} {}".format(d['weight'], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d['to_jimage']) in diff['self']:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d['to_jimage']) in diff['other']:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({'color_uv': '#00ff00'})
for u, v, k in red_edges:
g.edges[u, v, k].update({'color_uv': '#ff0000'})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename+".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename+".dot"]
rs = subprocess.Popen(args,
stdout=f,
stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename+".dot")
def as_dict(self):
"""
As in :Class: `pymatgen.core.Structure` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"graphs": json_graph.adjacency_data(self.graph)}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Structure` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
s = Structure.from_dict(d['structure'])
return cls(s, d['graphs'])
def __mul__(self, scaling_matrix):
"""
Replicates the graph, creating a supercell,
intelligently joining together
edges that lie on periodic boundaries.
In principle, any operations on the expanded
graph could also be done on the original
graph, but a larger graph can be easier to
visualize and reason about.
:param scaling_matrix: same as Structure.__mul__
:return:
"""
# Developer note: a different approach was also trialed, using
# a simple Graph (instead of MultiDiGraph), with node indices
# representing both site index and periodic image. Here, the
# number of nodes != number of sites in the Structure. This
# approach has many benefits, but made it more difficult to
# keep the graph in sync with its corresponding Structure.
# Broadly, it would be easier to multiply the Structure
# *before* generating the StructureGraph, but this isn't
# possible when generating the graph using critic2 from
# charge density.
# Multiplication works by looking for the expected position
# of an image node, and seeing if that node exists in the
# supercell. If it does, the edge is updated. This is more
# computationally expensive than just keeping track of the
# which new lattice images present, but should hopefully be
# easier to extend to a general 3x3 scaling matrix.
# code adapted from Structure.__mul__
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
else:
# TODO: test __mul__ with full 3x3 scaling matrices
raise NotImplementedError('Not tested with 3x3 scaling matrices yet.')
new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
new_graphs = []
for v in c_lat:
# create a map of nodes from original graph to its image
mapping = {n: n + len(new_sites) for n in range(len(self.structure))}
for idx, site in enumerate(self.structure):
s = PeriodicSite(site.species_and_occu, site.coords + v,
new_lattice, properties=site.properties,
coords_are_cartesian=True, to_unit_cell=False)
new_sites.append(s)
new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True))
new_structure = Structure.from_sites(new_sites)
# merge all graphs into one big graph
new_g = nx.MultiDiGraph()
for new_graph in new_graphs:
new_g = nx.union(new_g, new_graph)
edges_to_remove = [] # tuple of (u, v, k)
edges_to_add = [] # tuple of (u, v, attr_dict)
# list of new edges inside supercell
# for duplicate checking
edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True)
if d['to_jimage'] == (0, 0, 0)]
new_periodic_images = []
orig_lattice = self.structure.lattice
# use k-d tree to match given position to an
# existing Site in Structure
kd_tree = KDTree(new_structure.cart_coords)
# tolerance in Å for sites to be considered equal
# this could probably be a lot smaller
tol = 0.05
for u, v, k, d in new_g.edges(keys=True, data=True):
to_jimage = d['to_jimage'] # for node v
# reduce unnecessary checking
if to_jimage != (0, 0, 0):
# get index in original site
n_u = u % len(self.structure)
n_v = v % len(self.structure)
# get fractional co-ordinates of where atoms defined
# by edge are expected to be, relative to original
# lattice (keeping original lattice has
# significant benefits)
v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage)
u_frac = self.structure[n_u].frac_coords
# using the position of node u as a reference,
# get relative Cartesian co-ordinates of where
# atoms defined by edge are expected to be
v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac)
u_cart = orig_lattice.get_cartesian_coords(u_frac)
v_rel = np.subtract(v_image_cart, u_cart)
# now retrieve position of node v in
# new supercell, and get absolute Cartesian
# co-ordinates of where atoms defined by edge
# are expected to be
v_expec = new_structure[u].coords + v_rel
# now search in new structure for these atoms
# query returns (distance, index)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
# check if image sites now present in supercell
# and if so, delete old edge that went through
# periodic boundary
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
# node now inside supercell
new_d['to_jimage'] = (0, 0, 0)
edges_to_remove.append((u, v, k))
# make sure we don't try to add duplicate edges
# will remove two edges for everyone one we add
if {new_u, new_v} not in edges_inside_supercell:
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
edges_inside_supercell.append({new_u, new_v})
edges_to_add.append((new_u, new_v, new_d))
else:
# want to find new_v such that we have
# full periodic boundary conditions
# so that nodes on one side of supercell
# are connected to nodes on opposite side
v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec)
# find new to_jimage
# use np.around to fix issues with finite precision leading to incorrect image
v_expec_image = np.around(v_expec_frac, decimals=3)
v_expec_image = v_expec_image - v_expec_image%1
v_expec_frac = np.subtract(v_expec_frac, v_expec_image)
v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
new_to_jimage = tuple(map(int, v_expec_image))
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
new_to_jimage = tuple(np.multiply(-1, d['to_jimage']).astype(int))
new_d['to_jimage'] = new_to_jimage
edges_to_remove.append((u, v, k))
if (new_u, new_v, new_to_jimage) not in new_periodic_images:
edges_to_add.append((new_u, new_v, new_d))
new_periodic_images.append((new_u, new_v, new_to_jimage))
logger.debug("Removing {} edges, adding {} new edges.".format(len(edges_to_remove),
len(edges_to_add)))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
new_g.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
new_g.add_edge(u, v, **d)
# return new instance of StructureGraph with supercell
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": new_structure.as_dict(),
"graphs": json_graph.adjacency_data(new_g)}
sg = StructureGraph.from_dict(d)
return sg
def __rmul__(self, other):
return self.__mul__(other)
def _edges_to_string(self, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-"*max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0,1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.12E}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))),
data.get("weight", 0))
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v,
str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Structure / number of nodes in graph
"""
return len(self.structure)
def sort(self, key=None, reverse=False):
"""
Same as Structure.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_structure = self.structure.copy()
# sort Structure
self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse)
# apply Structure ordering to graph
mapping = {idx:self.structure.index(site) for idx, site in enumerate(old_structure)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d['to_jimage'] = tuple(np.multiply(-1, d['to_jimage']).astype(int))
edges_to_remove.append((u,v,k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return StructureGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two StructureGraphs are equal if they have equal Structures,
and have the same edges between Sites. Edge weights can be
different and StructureGraphs can still be considered equal.
:param other: StructureGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords):self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d['to_jimage'])
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d['to_jimage'])
for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and \
(self.structure == other_sorted.structure)
def diff(self, other, strict=True):
"""
Compares two StructureGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one StructureGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two StructureGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the StructureGraph this method is called
from, not the 'other' StructureGraph: there
is no guarantee the node indices will be the
same if the underlying Structures are ordered
differently.
:param other: StructureGraph
:param strict: if False, will compare bonds
from different Structures, with node indices
replaced by Specie strings, will not count
number of occurrences of bonds
:return:
"""
if self.structure != other.structure and strict:
return ValueError("Meaningless to compare StructureGraphs if "
"corresponding Structures are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords):self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d['to_jimage'])
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d['to_jimage'])
for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
else:
edges = {(str(self.structure[u].specie),
str(self.structure[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(str(other.structure[u].specie),
str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
'self': edges - edges_other,
'other': edges_other - edges,
'both': edges.intersection(edges_other),
'dist': jaccard_dist
}
def get_subgraphs_as_molecules(self, use_weights=False):
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
:param use_weights (bool): If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
:return: list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
if getattr(self, '_supercell_sg', None) is None:
self._supercell_sg = supercell_sg = self*(3,3,3)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = list(nx.connected_component_subgraphs(supercell_sg.graph))
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any([d['to_jimage'] != (0, 0, 0)
for u, v, d in subgraph.edges(data=True)])
if not intersects_boundary:
molecule_subgraphs.append(subgraph)
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1['specie'] == n2['specie']
def edge_match(e1, e2):
if use_weights:
return e1['weight'] == e2['weight']
else:
return True
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [nx.is_isomorphic(subgraph, g,
node_match=node_match,
edge_match=edge_match)
for g in unique_subgraphs]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n
in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n
in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
| mit |
mmottahedi/neuralnilm_prototype | scripts/e381.py | 2 | 6330 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
"""
e370
longer seq
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=128,
subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=True,
target_is_prediction=False,
independently_center_inputs=True,
standardise_input=True,
unit_variance_targets=True,
input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-1,
learning_rate_changes_by_iteration={
100: 1e-2,
1000: 1e-3
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 2,
'stride': 1,
'nonlinearity': tanh,
'border_mode': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
}
]
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
])
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
vibhorag/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
themrmax/scikit-learn | sklearn/tests/test_calibration.py | 64 | 12999 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from scipy import sparse
from sklearn.model_selection import LeaveOneOut
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
ignore_warnings)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@ignore_warnings
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
def test_calibration_prob_sum():
# Test that sum of probabilities is 1. A non-regression test for
# issue #7796
num_classes = 2
X, y = make_classification(n_samples=10, n_features=5,
n_classes=num_classes)
clf = LinearSVC(C=1.0)
clf_prob = CalibratedClassifierCV(clf, method="sigmoid", cv=LeaveOneOut())
clf_prob.fit(X, y)
probs = clf_prob.predict_proba(X)
assert_array_almost_equal(probs.sum(axis=1), np.ones(probs.shape[0]))
def test_calibration_less_classes():
# Test to check calibration works fine when train set in a test-train
# split does not contain all classes
# Since this test uses LOO, at each iteration train set will not contain a
# class label
X = np.random.randn(10, 5)
y = np.arange(10)
clf = LinearSVC(C=1.0)
cal_clf = CalibratedClassifierCV(clf, method="sigmoid", cv=LeaveOneOut())
cal_clf.fit(X, y)
for i, calibrated_classifier in \
enumerate(cal_clf.calibrated_classifiers_):
proba = calibrated_classifier.predict_proba(X)
assert_array_equal(proba[:, i], np.zeros(len(y)))
assert_equal(np.all(np.hstack([proba[:, :i],
proba[:, i + 1:]])), True)
| bsd-3-clause |
gef756/statsmodels | statsmodels/nonparametric/kde.py | 23 | 18710 | """
Univariate Kernel Density Estimators
References
----------
Racine, Jeff. (2008) "Nonparametric Econometrics: A Primer," Foundation and
Trends in Econometrics: Vol 3: No 1, pp1-88.
http://dx.doi.org/10.1561/0800000009
http://en.wikipedia.org/wiki/Kernel_%28statistics%29
Silverman, B.W. Density Estimation for Statistics and Data Analysis.
"""
from __future__ import absolute_import, print_function, division
from statsmodels.compat.python import range
# for 2to3 with extensions
import warnings
import numpy as np
from scipy import integrate, stats
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
from . import bandwidths
from .kdetools import (forrt, revrt, silverman_transform, counts)
from .linbin import fast_linbin
#### Kernels Switch for estimators ####
kernel_switch = dict(gau=kernels.Gaussian, epa=kernels.Epanechnikov,
uni=kernels.Uniform, tri=kernels.Triangular,
biw=kernels.Biweight, triw=kernels.Triweight,
cos=kernels.Cosine, cos2=kernels.Cosine2)
def _checkisfit(self):
try:
self.density
except:
raise ValueError("Call fit to fit the density first")
#### Kernel Density Estimator Class ###
class KDEUnivariate(object):
"""
Univariate Kernel Density Estimator.
Parameters
----------
endog : array-like
The variable for which the density estimate is desired.
Notes
-----
If cdf, sf, cumhazard, or entropy are computed, they are computed based on
the definition of the kernel rather than the FFT approximation, even if
the density is fit with FFT = True.
`KDEUnivariate` is much faster than `KDEMultivariate`, due to its FFT-based
implementation. It should be preferred for univariate, continuous data.
`KDEMultivariate` also supports mixed data.
See Also
--------
KDEMultivariate
kdensity, kdensityfft
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> nobs = 300
>>> np.random.seed(1234) # Seed random generator
>>> dens = sm.nonparametric.KDEUnivariate(np.random.normal(size=nobs))
>>> dens.fit()
>>> plt.plot(dens.cdf)
>>> plt.show()
"""
def __init__(self, endog):
self.endog = np.asarray(endog)
def fit(self, kernel="gau", bw="normal_reference", fft=True, weights=None,
gridsize=None, adjust=1, cut=3, clip=(-np.inf, np.inf)):
"""
Attach the density estimate to the KDEUnivariate class.
Parameters
----------
kernel : str
The Kernel to be used. Choices are:
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(X),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(X),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, it is the bandwidth.
fft : bool
Whether or not to use FFT. FFT implementation is more
computationally efficient. However, only the Gaussian kernel
is implemented. If FFT is False, then a 'nobs' x 'gridsize'
intermediate array is created.
gridsize : int
If gridsize is None, max(len(X), 50) is used.
cut : float
Defines the length of the grid past the lowest and highest values
of X so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(X) or max(X)}
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
"""
try:
bw = float(bw)
self.bw_method = "user-given"
except:
self.bw_method = bw
endog = self.endog
if fft:
if kernel != "gau":
msg = "Only gaussian kernel is available for fft"
raise NotImplementedError(msg)
if weights is not None:
msg = "Weights are not implemented for fft"
raise NotImplementedError(msg)
density, grid, bw = kdensityfft(endog, kernel=kernel, bw=bw,
adjust=adjust, weights=weights, gridsize=gridsize,
clip=clip, cut=cut)
else:
density, grid, bw = kdensity(endog, kernel=kernel, bw=bw,
adjust=adjust, weights=weights, gridsize=gridsize,
clip=clip, cut=cut)
self.density = density
self.support = grid
self.bw = bw
self.kernel = kernel_switch[kernel](h=bw) # we instantiate twice,
# should this passed to funcs?
# put here to ensure empty cache after re-fit with new options
self.kernel.weights = weights
if weights is not None:
self.kernel.weights /= weights.sum()
self._cache = resettable_cache()
@cache_readonly
def cdf(self):
"""
Returns the cumulative distribution function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
density = self.density
kern = self.kernel
if kern.domain is None: # TODO: test for grid point at domain bound
a,b = -np.inf,np.inf
else:
a,b = kern.domain
func = lambda x,s: kern.density(s,x)
support = self.support
support = np.r_[a,support]
gridsize = len(support)
endog = self.endog
probs = [integrate.quad(func, support[i-1], support[i],
args=endog)[0] for i in range(1,gridsize)]
return np.cumsum(probs)
@cache_readonly
def cumhazard(self):
"""
Returns the hazard function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return -np.log(self.sf)
@cache_readonly
def sf(self):
"""
Returns the survival function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return 1 - self.cdf
@cache_readonly
def entropy(self):
"""
Returns the differential entropy evaluated at the support
Notes
-----
Will not work if fit has not been called. 1e-12 is added to each
probability to ensure that log(0) is not called.
"""
_checkisfit(self)
def entr(x,s):
pdf = kern.density(s,x)
return pdf*np.log(pdf+1e-12)
pdf = self.density
kern = self.kernel
if kern.domain is not None:
a,b = self.domain
else:
a,b = -np.inf,np.inf
endog = self.endog
#TODO: below could run into integr problems, cf. stats.dist._entropy
return -integrate.quad(entr, a,b, args=(endog,))[0]
@cache_readonly
def icdf(self):
"""
Inverse Cumulative Distribution (Quantile) Function
Notes
-----
Will not work if fit has not been called. Uses
`scipy.stats.mstats.mquantiles`.
"""
_checkisfit(self)
gridsize = len(self.density)
return stats.mstats.mquantiles(self.endog, np.linspace(0,1,
gridsize))
def evaluate(self, point):
"""
Evaluate density at a single point.
Parameters
----------
point : float
Point at which to evaluate the density.
"""
_checkisfit(self)
return self.kernel.density(self.endog, point)
#### Kernel Density Estimator Functions ####
def kdensity(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None,
adjust=1, clip=(-np.inf,np.inf), cut=3, retgrid=True):
"""
Rosenblatt-Parzen univariate kernel density estimator.
Parameters
----------
X : array-like
The variable for which the density estimate is desired.
kernel : str
The Kernel to be used. Choices are
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float
"scott" - 1.059 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
"silverman" - .9 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
If a float is given, it is the bandwidth.
weights : array or None
Optional weights. If the X value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, max(len(X), 50) is used.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in X that are outside of the range given by clip are
dropped. The number of observations in X is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of X
so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(X) or max(X)}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : array
The densities estimated at the grid points.
grid : array, optional
The grid points at which the density is estimated.
Notes
-----
Creates an intermediate (`gridsize` x `nobs`) array. Use FFT for a more
computationally efficient version.
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:,None]
clip_x = np.logical_and(X>clip[0], X<clip[1])
X = X[clip_x]
nobs = len(X) # after trim
if gridsize == None:
gridsize = max(nobs,50) # don't need to resize if no FFT
# handle weights
if weights is None:
weights = np.ones(nobs)
q = nobs
else:
# ensure weights is a numpy array
weights = np.asarray(weights)
if len(weights) != len(clip_x):
msg = "The length of the weights must be the same as the given X."
raise ValueError(msg)
weights = weights[clip_x.squeeze()]
q = weights.sum()
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
# if bw is None, select optimal bandwidth for kernel
try:
bw = float(bw)
except:
bw = bandwidths.select_bandwidth(X, bw, kern)
bw *= adjust
a = np.min(X,axis=0) - cut*bw
b = np.max(X,axis=0) + cut*bw
grid = np.linspace(a, b, gridsize)
k = (X.T - grid[:,None])/bw # uses broadcasting to make a gridsize x nobs
# set kernel bandwidth
kern.seth(bw)
# truncate to domain
if kern.domain is not None: # won't work for piecewise kernels like parzen
z_lo, z_high = kern.domain
domain_mask = (k < z_lo) | (k > z_high)
k = kern(k) # estimate density
k[domain_mask] = 0
else:
k = kern(k) # estimate density
k[k<0] = 0 # get rid of any negative values, do we need this?
dens = np.dot(k,weights)/(q*bw)
if retgrid:
return dens, grid, bw
else:
return dens, bw
def kdensityfft(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None,
adjust=1, clip=(-np.inf,np.inf), cut=3, retgrid=True):
"""
Rosenblatt-Parzen univariate kernel density estimator
Parameters
----------
X : array-like
The variable for which the density estimate is desired.
kernel : str
ONLY GAUSSIAN IS CURRENTLY IMPLEMENTED.
"bi" for biweight
"cos" for cosine
"epa" for Epanechnikov, default
"epa2" for alternative Epanechnikov
"gau" for Gaussian.
"par" for Parzen
"rect" for rectangular
"tri" for triangular
bw : str, float
"scott" - 1.059 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
"silverman" - .9 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
If a float is given, it is the bandwidth.
weights : array or None
WEIGHTS ARE NOT CURRENTLY IMPLEMENTED.
Optional weights. If the X value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, min(len(X), 512) is used. Note that the provided
number is rounded up to the next highest power of 2.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in X that are outside of the range given by clip are
dropped. The number of observations in X is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of X
so that the kernel goes to zero. The end points are
-/+ cut*bw*{X.min() or X.max()}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : array
The densities estimated at the grid points.
grid : array, optional
The grid points at which the density is estimated.
Notes
-----
Only the default kernel is implemented. Weights aren't implemented yet.
This follows Silverman (1982) with changes suggested by Jones and Lotwick
(1984). However, the discretization step is replaced by linear binning
of Fan and Marron (1994). This should be extended to accept the parts
that are dependent only on the data to speed things up for
cross-validation.
References
---------- ::
Fan, J. and J.S. Marron. (1994) `Fast implementations of nonparametric
curve estimators`. Journal of Computational and Graphical Statistics.
3.1, 35-56.
Jones, M.C. and H.W. Lotwick. (1984) `Remark AS R50: A Remark on Algorithm
AS 176. Kernal Density Estimation Using the Fast Fourier Transform`.
Journal of the Royal Statistical Society. Series C. 33.1, 120-2.
Silverman, B.W. (1982) `Algorithm AS 176. Kernel density estimation using
the Fast Fourier Transform. Journal of the Royal Statistical Society.
Series C. 31.2, 93-9.
"""
X = np.asarray(X)
X = X[np.logical_and(X>clip[0], X<clip[1])] # won't work for two columns.
# will affect underlying data?
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
try:
bw = float(bw)
except:
bw = bandwidths.select_bandwidth(X, bw, kern) # will cross-val fit this pattern?
bw *= adjust
nobs = len(X) # after trim
# 1 Make grid and discretize the data
if gridsize == None:
gridsize = np.max((nobs,512.))
gridsize = 2**np.ceil(np.log2(gridsize)) # round to next power of 2
a = np.min(X)-cut*bw
b = np.max(X)+cut*bw
grid,delta = np.linspace(a,b,gridsize,retstep=True)
RANGE = b-a
#TODO: Fix this?
# This is the Silverman binning function, but I believe it's buggy (SS)
# weighting according to Silverman
# count = counts(X,grid)
# binned = np.zeros_like(grid) #xi_{k} in Silverman
# j = 0
# for k in range(int(gridsize-1)):
# if count[k]>0: # there are points of X in the grid here
# Xingrid = X[j:j+count[k]] # get all these points
# # get weights at grid[k],grid[k+1]
# binned[k] += np.sum(grid[k+1]-Xingrid)
# binned[k+1] += np.sum(Xingrid-grid[k])
# j += count[k]
# binned /= (nobs)*delta**2 # normalize binned to sum to 1/delta
#NOTE: THE ABOVE IS WRONG, JUST TRY WITH LINEAR BINNING
binned = fast_linbin(X,a,b,gridsize)/(delta*nobs)
# step 2 compute FFT of the weights, using Munro (1976) FFT convention
y = forrt(binned)
# step 3 and 4 for optimal bw compute zstar and the density estimate f
# don't have to redo the above if just changing bw, ie., for cross val
#NOTE: silverman_transform is the closed form solution of the FFT of the
#gaussian kernel. Not yet sure how to generalize it.
zstar = silverman_transform(bw, gridsize, RANGE)*y # 3.49 in Silverman
# 3.50 w Gaussian kernel
f = revrt(zstar)
if retgrid:
return f, grid, bw
else:
return f, bw
if __name__ == "__main__":
import numpy as np
np.random.seed(12345)
xi = np.random.randn(100)
f,grid, bw1 = kdensity(xi, kernel="gau", bw=.372735, retgrid=True)
f2, bw2 = kdensityfft(xi, kernel="gau", bw="silverman",retgrid=False)
# do some checking vs. silverman algo.
# you need denes.f, http://lib.stat.cmu.edu/apstat/176
#NOTE: I (SS) made some changes to the Fortran
# and the FFT stuff from Munro http://lib.stat.cmu.edu/apstat/97o
# then compile everything and link to denest with f2py
#Make pyf file as usual, then compile shared object
#f2py denest.f -m denest2 -h denest.pyf
#edit pyf
#-c flag makes it available to other programs, fPIC builds a shared library
#/usr/bin/gfortran -Wall -c -fPIC fft.f
#f2py -c denest.pyf ./fft.o denest.f
try:
from denest2 import denest # @UnresolvedImport
a = -3.4884382032045504
b = 4.3671504686785605
RANGE = b - a
bw = bandwidths.bw_silverman(xi)
ft,smooth,ifault,weights,smooth1 = denest(xi,a,b,bw,np.zeros(512),np.zeros(512),0,
np.zeros(512), np.zeros(512))
# We use a different binning algo, so only accurate up to 3 decimal places
np.testing.assert_almost_equal(f2, smooth, 3)
#NOTE: for debugging
# y2 = forrt(weights)
# RJ = np.arange(512/2+1)
# FAC1 = 2*(np.pi*bw/RANGE)**2
# RJFAC = RJ**2*FAC1
# BC = 1 - RJFAC/(6*(bw/((b-a)/M))**2)
# FAC = np.exp(-RJFAC)/BC
# SMOOTH = np.r_[FAC,FAC[1:-1]] * y2
# dens = revrt(SMOOTH)
except:
# ft = np.loadtxt('./ft_silver.csv')
# smooth = np.loadtxt('./smooth_silver.csv')
print("Didn't get the estimates from the Silverman algorithm")
| bsd-3-clause |
offirt/SMS-Broadcast | sms_broadcast.py | 1 | 2314 | import pandas as pd
import sys, getopt
from twilio.rest import Client
def main(argv):
file, nameColumn, phoneColumn, template, sendReal, twilioSid, twilioToken, twilioFrom = parseArgs(argv)
rows = pd.read_csv(file)
rows = rows[[nameColumn, phoneColumn]]
client = Client(twilioSid, twilioToken)
for index, row in rows.iterrows():
text = template.replace('<name>', row[nameColumn])
phone = '+{}'.format(row[phoneColumn])
sendSms(phone, text, sendReal, client, twilioFrom)
def sendSms(phone, text, sendReal, client, twilioFrom):
print('Sending SMS to {}. text: {}'.format(phone, text))
if sendReal:
message = client.messages.create(
to=phone,
from_=twilioFrom,
body=text)
print(message.sid)
def parseArgs(argv):
try:
opts, args = getopt.getopt(argv, "hsf:t:n:p:i:o:r:",
["send_real", "file=", "template=", "name_column=", "phone_column=", "twilio_sid", "twilio_token", "twilio_from"])
except getopt.GetoptError:
printHelp()
sys.exit(2)
file = ''
template = ''
nameColumn = 'Name'
phoneColumn = 'Phone number'
sendReal = False
twilioSid = ''
twilioToken = ''
twilioFrom = ''
for opt, arg in opts:
if opt == '-h':
printHelp()
sys.exit()
elif opt in ("-s", "--send_real"):
sendReal = True
elif opt in ("-f", "--file"):
file = arg
elif opt in ("-t", "--template"):
template = arg
elif opt in ("-n", "--name_column"):
nameColumn = arg
elif opt in ("-p", "--phone_column"):
phoneColumn = arg
elif opt in ("-i", "--twilio_sid"):
twilioSid = arg
elif opt in ("-o", "--twilio_token"):
twilioToken = arg
elif opt in ("-r", "--twilio_from"):
twilioFrom = arg
return file, nameColumn, phoneColumn, template, sendReal, twilioSid, twilioToken, twilioFrom
def printHelp():
print(
'sms_broadcast.py -s -f <csv_file> -t <text_template> -n <name_column_name> ' +
'-p <phone_number_column_name> -i <twilio_sid> -o <twilio_token> -r <twilio_from>')
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
100star/h2o | py/testdir_single_jvm/test_GLM2_score_same.py | 9 | 4631 |
## Dataset created from this:
#
# from sklearn.datasets import make_hastie_10_2
# import numpy as np
# i = 1000000
# f = 10
# (X,y) = make_hastie_10_2(n_samples=i,random_state=None)
# y.shape = (i,1)
# Y = np.hstack((X,y))
# np.savetxt('./1mx' + str(f) + '_hastie_10_2.data', Y, delimiter=',', fmt='%.2f');
import unittest, time, sys, copy
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_util, h2o_import as h2i
def glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs, pollTimeoutSecs, **kwargs):
print "\nStarting GLM of", csvFilename
hex_key = csvFilename + ".hex"
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=60, pollTimeoutSecs=pollTimeoutSecs)
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "GLM in", (time.time() - start), "secs (python)"
h2o_glm.simpleCheckGLM(self, glm, "C8", **kwargs)
# compare this glm to the first one. since the files are replications, the results
# should be similar?
glm_model = glm['glm_model']
validation = glm_model['submodels'][0]['validation']
modelKey = glm_model['_key']
return modelKey, validation, parseResult
def glm_score(self, csvFilename, bucket, csvPathname, modelKey, modelPathname, timeoutSecs=30, pollTimeoutSecs=30):
print "\nStarting GLM score of", csvFilename
hex_key = csvFilename + ".hex"
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=timeoutSecs, pollTimeoutSecs=pollTimeoutSecs)
y = "10"
# save and restore the model
h2o.nodes[0].save_model(model=modelKey, path=modelPathname, force=1)
# FIX! should we remove the existing key to make sure it loads? really should try both cases (existing or not)
h2o.nodes[0].load_model(path=modelPathname)
start = time.time()
glmScore = h2o_cmd.runScore(dataKey=parseResult['destination_key'], modelKey=modelKey,
vactual=y, vpredict=1, expectedAuc=0.5, doAUC=False)
print "GLMScore in", (time.time() - start), "secs (python)"
h2o.verboseprint(h2o.dump_json(glmScore))
# compare this glm to the first one. since the files are replications,
# the results
# should be similar?
# UPDATE: format for returning results is slightly different than normal GLM
if self.glmScore1:
h2o_glm.compareToFirstGlm(self, 'mse', glmScore, self.glmScore1)
else:
self.glmScore1 = copy.deepcopy(glmScore)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
glmScore1 = {}
def test_GLM2_score_same(self):
# gunzip it and cat it to create 2x and 4x replications in SYNDATASETS_DIR
bucket = 'home-0xdiag-datasets'
csvFilename = "1mx10_hastie_10_2.data.gz"
csvPathname = 'standard' + '/' + csvFilename
y = "10"
kwargs = {'response': y, 'alpha': 0, 'family': 'gaussian'}
(modelKey, validation1, parseResult) = glm_doit(self, csvFilename, bucket, csvPathname,
timeoutSecs=60, pollTimeoutSecs=60, **kwargs)
print "Use", modelKey, "model on 2x and 4x replications and compare results to 1x"
filename1x = "hastie_1x.data"
pathname1x = SYNDATASETS_DIR + '/' + filename1x
fullPathname = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
h2o_util.file_gunzip(fullPathname, pathname1x)
filename2x = "hastie_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
modelPathname = SYNDATASETS_DIR + '/model_' + filename2x
bucket = None
h2o_util.file_cat(pathname1x,pathname1x,pathname2x)
glm_score(self,filename2x, bucket, pathname2x, modelKey, modelPathname, timeoutSecs=60, pollTimeoutSecs=60)
filename4x = "hastie_4x.data"
pathname4x = SYNDATASETS_DIR + '/' + filename4x
modelPathname = SYNDATASETS_DIR + '/model_' + filename4x
h2o_util.file_cat(pathname2x, pathname2x, pathname4x)
print "Iterating 3 times on this last one"
for i in range(3):
print "\nTrial #", i, "of", filename4x
glm_score(self,filename4x, bucket, pathname4x, modelKey, modelPathname, timeoutSecs=60, pollTimeoutSecs=60)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
bartvm/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py | 39 | 5044 | """
WRITEME
"""
import logging
from ..linear import LinearTransform
from .unshared_conv import FilterActs, ImgActs
from theano.compat.six.moves import xrange
from theano.sandbox import cuda
if cuda.cuda_available:
import gpu_unshared_conv # register optimizations
import numpy as np
import warnings
try:
import matplotlib.pyplot as plt
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
logger = logging.getLogger(__name__)
class LocalDot(LinearTransform):
"""
LocalDot is an linear operation computationally similar to
convolution in the spatial domain, except that whereas convolution
applying a single filter or set of filters across an image, the
LocalDot has different filterbanks for different points in the image.
Mathematically, this is a general linear transform except for a
restriction that filters are 0 outside of a spatially localized patch
within the image.
Image shape is 5-tuple:
color_groups
colors_per_group
rows
cols
images
Filterbank shape is 7-tuple (!)
0 row_positions
1 col_positions
2 colors_per_group
3 height
4 width
5 color_groups
6 filters_per_group
The result of left-multiplication a 5-tuple with shape:
filter_groups
filters_per_group
row_positions
col_positions
images
Parameters
----------
filters : WRITEME
irows : WRITEME
Image rows
icols : WRITEME
Image columns
subsample : WRITEME
padding_start : WRITEME
filters_shape : WRITEME
message : WRITEME
"""
def __init__(self, filters, irows, icols=None,
subsample=(1, 1),
padding_start=None,
filters_shape=None,
message=""):
LinearTransform.__init__(self, [filters])
self._filters = filters
if filters_shape is None:
self._filters_shape = tuple(filters.get_value(borrow=True).shape)
else:
self._filters_shape = tuple(filters_shape)
self._irows = irows
if icols is None:
self._icols = irows
else:
self._icols = icols
if self._icols != self._irows:
raise NotImplementedError('GPU code at least needs square imgs')
self._subsample = tuple(subsample)
self._padding_start = padding_start
if len(self._filters_shape) != 7:
raise TypeError('need 7-tuple filter shape', self._filters_shape)
if self._subsample[0] != self._subsample[1]:
raise ValueError('subsampling must be same in rows and cols')
self._filter_acts = FilterActs(self._subsample[0])
self._img_acts = ImgActs(module_stride=self._subsample[0])
if message:
self._message = message
else:
self._message = filters.name
def rmul(self, x):
"""
.. todo::
WRITEME
"""
assert x.ndim == 5
return self._filter_acts(x, self._filters)
def rmul_T(self, x):
"""
.. todo::
WRITEME
"""
return self._img_acts(self._filters, x, self._irows, self._icols)
def col_shape(self):
"""
.. todo::
WRITEME
"""
ishape = self.row_shape() + (-99,)
fshape = self._filters_shape
hshape, = self._filter_acts.infer_shape(None, (ishape, fshape))
assert hshape[-1] == -99
return hshape[:-1]
def row_shape(self):
"""
.. todo::
WRITEME
"""
fshape = self._filters_shape
fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2]
fgroups, filters_per_group = fshape[-2:]
return fgroups, fcolors, self._irows, self._icols
def print_status(self):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("TODO: fix dependence on non-existent "
"ndarray_status function")
"""print ndarray_status(
self._filters.get_value(borrow=True),
msg='%s{%s}'% (self.__class__.__name__,
self._message))
"""
def imshow_gray(self):
"""
.. todo::
WRITEME
"""
filters = self._filters.get_value()
modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape
logger.info(filters.shape)
rval = np.zeros((
modR * (rows + 1) - 1,
modC * (cols + 1) - 1,
))
for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)):
for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)):
rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0]
plt.imshow(rval, cmap='gray')
return rval
| bsd-3-clause |
toobaz/pandas | pandas/io/formats/csvs.py | 2 | 11062 | """
Module for formatting output data into CSV files.
"""
import csv as csvlib
from io import StringIO
import os
import warnings
from zipfile import ZipFile
import numpy as np
from pandas._libs import writers as libwriters
from pandas.core.dtypes.generic import (
ABCDatetimeIndex,
ABCIndexClass,
ABCMultiIndex,
ABCPeriodIndex,
)
from pandas.core.dtypes.missing import notna
from pandas.io.common import (
UnicodeWriter,
_get_handle,
_infer_compression,
get_filepath_or_buffer,
)
class CSVFormatter:
def __init__(
self,
obj,
path_or_buf=None,
sep=",",
na_rep="",
float_format=None,
cols=None,
header=True,
index=True,
index_label=None,
mode="w",
encoding=None,
compression="infer",
quoting=None,
line_terminator="\n",
chunksize=None,
quotechar='"',
date_format=None,
doublequote=True,
escapechar=None,
decimal=".",
):
self.obj = obj
if path_or_buf is None:
path_or_buf = StringIO()
self.path_or_buf, _, _, _ = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression, mode=mode
)
self.sep = sep
self.na_rep = na_rep
self.float_format = float_format
self.decimal = decimal
self.header = header
self.index = index
self.index_label = index_label
self.mode = mode
if encoding is None:
encoding = "utf-8"
self.encoding = encoding
self.compression = _infer_compression(self.path_or_buf, compression)
if quoting is None:
quoting = csvlib.QUOTE_MINIMAL
self.quoting = quoting
if quoting == csvlib.QUOTE_NONE:
# prevents crash in _csv
quotechar = None
self.quotechar = quotechar
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator or os.linesep
self.date_format = date_format
self.has_mi_columns = isinstance(obj.columns, ABCMultiIndex)
# validate mi options
if self.has_mi_columns:
if cols is not None:
raise TypeError(
"cannot specify cols with a MultiIndex on the " "columns"
)
if cols is not None:
if isinstance(cols, ABCIndexClass):
cols = cols.to_native_types(
na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting,
)
else:
cols = list(cols)
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
cols = self.obj.columns
if isinstance(cols, ABCIndexClass):
cols = cols.to_native_types(
na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting,
)
else:
cols = list(cols)
# save it
self.cols = cols
# preallocate data 2d list
self.blocks = self.obj._data.blocks
ncols = sum(b.shape[0] for b in self.blocks)
self.data = [None] * ncols
if chunksize is None:
chunksize = (100000 // (len(self.cols) or 1)) or 1
self.chunksize = int(chunksize)
self.data_index = obj.index
if (
isinstance(self.data_index, (ABCDatetimeIndex, ABCPeriodIndex))
and date_format is not None
):
from pandas import Index
self.data_index = Index(
[x.strftime(date_format) if notna(x) else "" for x in self.data_index]
)
self.nlevels = getattr(self.data_index, "nlevels", 1)
if not index:
self.nlevels = 0
def save(self):
"""
Create the writer & save
"""
# GH21227 internal compression is not used when file-like passed.
if self.compression and hasattr(self.path_or_buf, "write"):
msg = "compression has no effect when passing file-like " "object as input."
warnings.warn(msg, RuntimeWarning, stacklevel=2)
# when zip compression is called.
is_zip = isinstance(self.path_or_buf, ZipFile) or (
not hasattr(self.path_or_buf, "write") and self.compression == "zip"
)
if is_zip:
# zipfile doesn't support writing string to archive. uses string
# buffer to receive csv writing and dump into zip compression
# file handle. GH21241, GH21118
f = StringIO()
close = False
elif hasattr(self.path_or_buf, "write"):
f = self.path_or_buf
close = False
else:
f, handles = _get_handle(
self.path_or_buf,
self.mode,
encoding=self.encoding,
compression=self.compression,
)
close = True
try:
writer_kwargs = dict(
lineterminator=self.line_terminator,
delimiter=self.sep,
quoting=self.quoting,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
)
if self.encoding == "ascii":
self.writer = csvlib.writer(f, **writer_kwargs)
else:
writer_kwargs["encoding"] = self.encoding
self.writer = UnicodeWriter(f, **writer_kwargs)
self._save()
finally:
if is_zip:
# GH17778 handles zip compression separately.
buf = f.getvalue()
if hasattr(self.path_or_buf, "write"):
self.path_or_buf.write(buf)
else:
f, handles = _get_handle(
self.path_or_buf,
self.mode,
encoding=self.encoding,
compression=self.compression,
)
f.write(buf)
close = True
if close:
f.close()
for _fh in handles:
_fh.close()
def _save_header(self):
writer = self.writer
obj = self.obj
index_label = self.index_label
cols = self.cols
has_mi_columns = self.has_mi_columns
header = self.header
encoded_labels = []
has_aliases = isinstance(header, (tuple, list, np.ndarray, ABCIndexClass))
if not (has_aliases or self.header):
return
if has_aliases:
if len(header) != len(cols):
raise ValueError(
(
"Writing {ncols} cols but got {nalias} "
"aliases".format(ncols=len(cols), nalias=len(header))
)
)
else:
write_cols = header
else:
write_cols = cols
if self.index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(obj.index, ABCMultiIndex):
index_label = []
for i, name in enumerate(obj.index.names):
if name is None:
name = ""
index_label.append(name)
else:
index_label = obj.index.name
if index_label is None:
index_label = [""]
else:
index_label = [index_label]
elif not isinstance(
index_label, (list, tuple, np.ndarray, ABCIndexClass)
):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if not has_mi_columns or has_aliases:
encoded_labels += list(write_cols)
writer.writerow(encoded_labels)
else:
# write out the mi
columns = obj.columns
# write out the names for each level, then ALL of the values for
# each level
for i in range(columns.nlevels):
# we need at least 1 index column to write our col names
col_line = []
if self.index:
# name is the first column
col_line.append(columns.names[i])
if isinstance(index_label, list) and len(index_label) > 1:
col_line.extend([""] * (len(index_label) - 1))
col_line.extend(columns._get_level_values(i))
writer.writerow(col_line)
# Write out the index line if it's not empty.
# Otherwise, we will print out an extraneous
# blank line between the mi and the data rows.
if encoded_labels and set(encoded_labels) != {""}:
encoded_labels.extend([""] * len(columns))
writer.writerow(encoded_labels)
def _save(self):
self._save_header()
nrows = len(self.data_index)
# write in chunksize bites
chunksize = self.chunksize
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self._save_chunk(start_i, end_i)
def _save_chunk(self, start_i, end_i):
data_index = self.data_index
# create the data for a chunk
slicer = slice(start_i, end_i)
for i in range(len(self.blocks)):
b = self.blocks[i]
d = b.to_native_types(
slicer=slicer,
na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting,
)
for col_loc, col in zip(b.mgr_locs, d):
# self.data is a preallocated list
self.data[col_loc] = col
ix = data_index.to_native_types(
slicer=slicer,
na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting,
)
libwriters.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer)
| bsd-3-clause |
google-research/weakly_supervised_control | scripts/generate_hardware_datasets.py | 1 | 7837 | # Copyright 2020 The Weakly-Supervised Control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generates a weakly-labelled dataset of observations.
Double-click pixel to set position.
Press {'r', 'y', 'g', 'b', 't'} to label object position.
Press 'n' to go to next image, or '-' to go to previous image.
Press 'c' to copy previous labels to the current image.
Press 'z' to clear all labels for the current image.
Press 'd' to toggle do_not_use flag.
Press 's' to save.
Press 'q' to exit & save labelled data.
python -m scripts.generate_hardware_datasets --input-paths output/robot_observations-2020-02-28-17:07:51.npz,output/robot_observations-2020-02-28-18:41:34.npz
"""
import os
from typing import Any, Callable, Dict, List, Optional
from datetime import datetime
import cv2
import click
import gym
import matplotlib.pyplot as plt
import numpy as np
from weakly_supervised_control.envs import register_all_envs
from weakly_supervised_control.envs.env_util import get_camera_fn
from weakly_supervised_control.envs.hardware_robot import concat_images
from weakly_supervised_control.disentanglement.np_data import NpGroundTruthData
KEY_TO_LABEL = {
ord('r'): 'obj_pos_red',
ord('b'): 'obj_pos_blue',
ord('g'): 'obj_pos_green',
ord('y'): 'obj_pos_yellow',
ord('t'): 'obj_pos_purple',
}
LABEL_TO_COLOR = { # (b, g, r)
'obj_pos_red': (0, 0, 255),
'obj_pos_blue': (255, 0, 0),
'obj_pos_green': (0, 153, 0),
'obj_pos_yellow': (0, 255, 255),
'obj_pos_purple': (255, 0, 255),
}
def plot_sample(dset: NpGroundTruthData, save_path: str = None):
x1, x2, y = dset.sample_rank_pair(1)
x1 = concat_images(x1[0])
x2 = concat_images(x2[0])
fig, axes = plt.subplots(2, 1)
axes[0].imshow(x1)
axes[1].imshow(x2)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
else:
plt.show()
def get_mouse_position(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDBLCLK:
param['position'] = (x, y)
print(param)
def save_dset(data, factors, factor_names, save_prefix: str = ""):
dset = NpGroundTruthData(
data, factors, factor_names=factor_names)
output_prefix = save_prefix + f'-n{len(data)}'
dset.save(output_prefix + '.npz')
plot_sample(dset, save_path=output_prefix + '.png')
def show_image(observations: Dict,
idx: int,
print_labels: bool = False,
width: int = 1000,
height: int = 1000):
if idx >= len(observations):
print(
f"Index {idx} greater than number of observations {len(observations)}")
return
if print_labels:
print(f'Image {idx} (out of {len(observations)}):')
print('do_not_use:', observations[idx].get('do_not_use', False))
images = observations[idx]['image_observation'].copy()
images = concat_images(images, resize_shape=(1000, 1000))
for label in KEY_TO_LABEL.values():
position = observations[idx].get(label, None)
if print_labels:
print(f"{label}: {position}")
color = LABEL_TO_COLOR[label]
cv2.circle(images, position, 20, color, -1)
cv2.imshow('image', images)
@click.command()
@click.option('--input-paths', type=str, help="Comma-separated list of paths")
@click.option('--test-data-size', type=int, default=256, help="Comma-separated list of paths")
def main(input_paths: str, test_data_size: int):
# Read observations.
input_paths = input_paths.split(',')
observations = []
for input_path in input_paths:
data = np.load(input_path, allow_pickle=True)
observations += data['observations'].tolist()
print(
f'Loaded {len(observations)} observations from {len(input_paths)} files.')
output_path = 'output/robot_observations-' + \
datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
param = {}
cv2.namedWindow('image')
cv2.setMouseCallback('image', get_mouse_position, param)
idx = 0
prev_idx = None
while idx < len(observations):
show_image(observations, idx)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'): # Exit
break
elif key == ord('s'): # Save
with open(output_path + '.npz', 'wb') as f:
np.savez(f, observations=observations)
print(f"Saved to {output_path}.npz")
elif key == ord('x'): # Delete current image
print(f"Deleting labels for image {idx}")
for label in KEY_TO_LABEL.values():
if label in observations[idx]:
del observations[idx][label]
show_image(observations, idx, print_labels=True)
elif key == ord('n'): # Next image
prev_idx = idx
idx += 1
show_image(observations, idx, print_labels=True)
elif key == ord('-'): # Previous image
prev_idx = idx
idx -= 1
show_image(observations, idx, print_labels=True)
elif key == ord('c'): # Copy previous labels
if prev_idx is None:
print(f"Cannot copy previous labels for idx {idx}")
continue
for label in KEY_TO_LABEL.values():
if label in observations[prev_idx]:
observations[idx][label] = observations[prev_idx][label]
show_image(observations, idx, print_labels=True)
elif key == ord('d'): # Do not use
observations[idx]['do_not_use'] = not observations[idx].get(
'do_not_use', False)
show_image(observations, idx, print_labels=True)
elif key in KEY_TO_LABEL.keys(): # Label image
label = KEY_TO_LABEL[key]
observations[idx][label] = param['position']
show_image(observations, idx, print_labels=True)
# Save labelled data.
with open(output_path + '.npz', 'wb') as f:
np.savez(f, observations=observations)
print(f"Saved to {output_path}.npz")
# Create dataset.
data = []
factors = []
for i, o in enumerate(observations):
# Skip images with occlusion.
if o.get('do_not_use', False):
continue
y = o['end_effector_pos'].tolist()
for label in KEY_TO_LABEL.values():
if label in o:
pos = o[label]
else:
# Skip images with incomplete labels
print(f"Skipping image {i}: {o}")
y = None
break
y += pos
if y is not None:
obs = o['image_observation'] / 255.0
data.append(obs)
factors.append(y)
data = np.array(data)
factors = np.array(factors)
factor_names = ['hand_pos_x', 'hand_pos_y', 'hand_pos_z']
for label in KEY_TO_LABEL.values():
factor_names += [label + '_x', label + '_y']
# Split into train/test sets.
indices = np.random.permutation(len(data))
test_indices = indices[:test_data_size]
train_indices = indices[test_data_size:]
save_dset(data[train_indices], factors[train_indices],
factor_names, save_prefix=output_path)
save_dset(data[test_indices], factors[test_indices],
factor_names, save_prefix=output_path)
if __name__ == '__main__':
main()
| apache-2.0 |
andaag/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
eljost/pysisyphus | tests_staging/test_networkx/test_networkx.py | 1 | 1765 | #!/usr/bin/env/python3
from pprint import pprint
import matplotlib.pyplot as plt
import networkx as nx
from networkx.algorithms import isomorphism
from pysisyphus.intcoords.setup import get_bond_mat
from pysisyphus.helpers import geom_from_xyz_file
def get_labels(geom):
return {i: f"{i:02d}_{atom}" for i, atom in enumerate(geom.atoms)}
def run():
fn = "07_ref_rx_phosphine_def2tzvp_reopt.xyz"
geom = geom_from_xyz_file(fn)
bm = get_bond_mat(geom)
print(bm)
node_attrs = {i: {"atom": atom} for i, atom in enumerate(geom.atoms)}
g = nx.from_numpy_array(bm)
nx.set_node_attributes(g, node_attrs)
# import pdb; pdb.set_trace()
# fig, ax = plt.subplots()
# draw_kwargs = {
# "ax": ax,
# "with_labels": True,
# "node_size": 250,
# }
# nx.draw(g, labels=get_labels(geom), **draw_kwargs)
# plt.show()
prod_fn = "01_ref_rx_product_opt.xyz"
prod = geom_from_xyz_file(prod_fn)
pbm = get_bond_mat(prod)
gp = nx.from_numpy_array(pbm)
pnode_attrs = {i: {"atom": atom} for i, atom in enumerate(prod.atoms)}
nx.set_node_attributes(gp, pnode_attrs)
# fig, ax = plt.subplots()
# draw_kwargs["ax"] = ax
# nx.draw(gp, labels=get_labels(prod), **draw_kwargs)
# plt.show()
gm = isomorphism.GraphMatcher(gp, g)
si = gm.subgraph_is_isomorphic()
sims = list(gm.subgraph_isomorphisms_iter())
llens = [len(_) for _ in sims]
pprint(sims)
print(llens)
ms = [i for i, d in enumerate(sims)
if all([i == j for i, j in d.items()])
]
mapping = sims[ms[0]]
pprint(mapping)
# import pdb; pdb.set_trace()
# for mapping in sims:
# import pdb; pdb.set_trace()
pass
if __name__ == "__main__":
run()
| gpl-3.0 |
DQE-Polytech-University/Beamplex | tests/helmholtz_test_output.py | 1 | 1334 | import unittest
import sys
from os import path
from matplotlib import pyplot as plt
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from src.helmholtz import *
class TestHelmholtzInput(unittest.TestCase):
def testHelmholtz(self):
try:
solver = HelmholtzSolver(999, 0.85, [1, 1.05, 0.007, 0.75, 1], [3.331564442988186, 3.393487851264978, 3.6, 3.393487851264978, 3.331564442988186])
solver.refractionMatrix()
solver.find_neffective()
solver.find_matrix()
solver.coeffs(int(999 * 0.9))
solver.find_Xforward()
solver.find_Xrev()
solver.Field()
solver.Norm()
except Exception, error:
self.fail(error)
error = []
f = open("test_field_output.txt", "r")
for x in range(1000):
n = float(f.readline())
solver.UTOTAL[x] = solver.UTOTAL[x] ** 2
error.append(solver.UTOTAL[x] - n)
error[x] = abs(error[x])
plt.plot(solver.gridX, error)
plt.xlabel('position, micrometers')
plt.ylabel('error, arb. units')
plt.savefig('error.png', format='png', dpi=100)
plt.clf()
if __name__ == '__main__':
unittest.main()
| mit |
mlyundin/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
averagehat/scikit-bio | skbio/stats/ordination/_base.py | 4 | 16284 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import zip
from functools import partial
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from IPython.core.pylabtools import print_figure
from IPython.core.display import Image, SVG
from skbio._base import SkbioObject
from skbio.stats._misc import _pprint_strs
from skbio.util._decorator import experimental
# avoid flake8 unused import error
Axes3D
class OrdinationResults(SkbioObject):
"""Store ordination results, providing serialization and plotting support.
Stores various components of ordination results. Provides methods for
serializing/deserializing results, as well as generation of basic
matplotlib 3-D scatterplots. Will automatically display PNG/SVG
representations of itself within the IPython Notebook.
Attributes
----------
eigvals : 1-D numpy array
The result eigenvalues
species : 2-D numpy array
The result coordinates for each species
site : 2-D numpy array
The results coordinates for each site
biplot : 2-D numpy array
The result biplot coordinates
site_constraints : 2-D numpy array
The result coordinates for each site constraint
proportion_explained : 1-D numpy array
The proportion explained by each eigenvector
species_ids : list of str
The species identifiers
site_ids : list of str
The site identifiers
png
svg
"""
default_write_format = 'ordination'
@experimental(as_of="0.4.0")
def __init__(self, eigvals, species=None, site=None, biplot=None,
site_constraints=None, proportion_explained=None,
species_ids=None, site_ids=None):
self.eigvals = eigvals
self.species = species
self.site = site
self.biplot = biplot
self.site_constraints = site_constraints
self.proportion_explained = proportion_explained
self.species_ids = species_ids
self.site_ids = site_ids
@experimental(as_of="0.4.0")
def __str__(self):
"""Return a string representation of the ordination results.
String representation lists ordination results attributes and indicates
whether or not they are present. If an attribute is present, its
dimensions are listed. A truncated list of species and site IDs are
included (if they are present).
Returns
-------
str
String representation of the ordination results.
"""
lines = ['Ordination results:']
attrs = [(self.eigvals, 'Eigvals'),
(self.proportion_explained, 'Proportion explained'),
(self.species, 'Species'),
(self.site, 'Site'),
(self.biplot, 'Biplot'),
(self.site_constraints, 'Site constraints')]
for attr, attr_label in attrs:
def formatter(e):
return 'x'.join(['%d' % s for s in e.shape])
lines.append(self._format_attribute(attr, attr_label, formatter))
lines.append(self._format_attribute(self.species_ids, 'Species IDs',
lambda e: _pprint_strs(e)))
lines.append(self._format_attribute(self.site_ids, 'Site IDs',
lambda e: _pprint_strs(e)))
return '\n'.join(lines)
@experimental(as_of="0.4.0")
def plot(self, df=None, column=None, axes=(0, 1, 2), axis_labels=None,
title='', cmap=None, s=20):
"""Create a 3-D scatterplot of ordination results colored by metadata.
Creates a 3-D scatterplot of the ordination results, where each point
represents a site. Optionally, these points can be colored by metadata
(see `df` and `column` below).
Parameters
----------
df : pandas.DataFrame, optional
``DataFrame`` containing site metadata. Must be indexed by site ID,
and all site IDs in the ordination results must exist in the
``DataFrame``. If ``None``, sites (i.e., points) will not be
colored by metadata.
column : str, optional
Column name in `df` to color sites (i.e., points in the plot) by.
Cannot have missing data (i.e., ``np.nan``). `column` can be
numeric or categorical. If numeric, all values in the column will
be cast to ``float`` and mapped to colors using `cmap`. A colorbar
will be included to serve as a legend. If categorical (i.e., not
all values in `column` could be cast to ``float``), colors will be
chosen for each category using evenly-spaced points along `cmap`. A
legend will be included. If ``None``, sites (i.e., points) will not
be colored by metadata.
axes : iterable of int, optional
Indices of site coordinates to plot on the x-, y-, and z-axes. For
example, if plotting PCoA results, ``axes=(0, 1, 2)`` will plot
PC 1 on the x-axis, PC 2 on the y-axis, and PC 3 on the z-axis.
Must contain exactly three elements.
axis_labels : iterable of str, optional
Labels for the x-, y-, and z-axes. If ``None``, labels will be the
values of `axes` cast as strings.
title : str, optional
Plot title.
cmap : str or matplotlib.colors.Colormap, optional
Name or instance of matplotlib colormap to use for mapping `column`
values to colors. If ``None``, defaults to the colormap specified
in the matplotlib rc file. Qualitative colormaps (e.g., ``Set1``)
are recommended for categorical data, while sequential colormaps
(e.g., ``Greys``) are recommended for numeric data. See [1]_ for
these colormap classifications.
s : scalar or iterable of scalars, optional
Size of points. See matplotlib's ``Axes3D.scatter`` documentation
for more details.
Returns
-------
matplotlib.figure.Figure
Figure containing the scatterplot and legend/colorbar if metadata
were provided.
Raises
------
ValueError
Raised on invalid input, including the following situations:
- there are not at least three dimensions to plot
- there are not exactly three values in `axes`, they are not
unique, or are out of range
- there are not exactly three values in `axis_labels`
- either `df` or `column` is provided without the other
- `column` is not in the ``DataFrame``
- site IDs in the ordination results are not in `df` or have
missing data in `column`
See Also
--------
mpl_toolkits.mplot3d.Axes3D.scatter
Notes
-----
This method creates basic plots of ordination results, and is intended
to provide a quick look at the results in the context of metadata
(e.g., from within the IPython Notebook). For more customization and to
generate publication-quality figures, we recommend EMPeror [2]_.
References
----------
.. [1] http://matplotlib.org/examples/color/colormaps_reference.html
.. [2] EMPeror: a tool for visualizing high-throughput microbial
community data. Vazquez-Baeza Y, Pirrung M, Gonzalez A, Knight R.
Gigascience. 2013 Nov 26;2(1):16. http://biocore.github.io/emperor/
Examples
--------
.. plot::
Define a distance matrix with four sites labelled A-D:
>>> from skbio import DistanceMatrix
>>> dm = DistanceMatrix([[0., 0.21712454, 0.5007512, 0.91769271],
... [0.21712454, 0., 0.45995501, 0.80332382],
... [0.5007512, 0.45995501, 0., 0.65463348],
... [0.91769271, 0.80332382, 0.65463348, 0.]],
... ['A', 'B', 'C', 'D'])
Define metadata for each site in a ``pandas.DataFrame``:
>>> import pandas as pd
>>> metadata = {
... 'A': {'body_site': 'skin'},
... 'B': {'body_site': 'gut'},
... 'C': {'body_site': 'gut'},
... 'D': {'body_site': 'skin'}}
>>> df = pd.DataFrame.from_dict(metadata, orient='index')
Run principal coordinate analysis (PCoA) on the distance matrix:
>>> from skbio.stats.ordination import PCoA
>>> pcoa_results = PCoA(dm).scores()
Plot the ordination results, where each site is colored by body site
(a categorical variable):
>>> fig = pcoa_results.plot(df=df, column='body_site',
... title='Sites colored by body site',
... cmap='Set1', s=50)
"""
# Note: New features should not be added to this method and should
# instead be added to EMPeror (http://biocore.github.io/emperor/).
# Only bug fixes and minor updates should be made to this method.
coord_matrix = self.site.T
self._validate_plot_axes(coord_matrix, axes)
# derived from
# http://matplotlib.org/examples/mplot3d/scatter3d_demo.html
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = coord_matrix[axes[0]]
ys = coord_matrix[axes[1]]
zs = coord_matrix[axes[2]]
point_colors, category_to_color = self._get_plot_point_colors(
df, column, self.site_ids, cmap)
scatter_fn = partial(ax.scatter, xs, ys, zs, s=s)
if point_colors is None:
plot = scatter_fn()
else:
plot = scatter_fn(c=point_colors, cmap=cmap)
if axis_labels is None:
axis_labels = ['%d' % axis for axis in axes]
elif len(axis_labels) != 3:
raise ValueError("axis_labels must contain exactly three elements "
"(found %d elements)." % len(axis_labels))
ax.set_xlabel(axis_labels[0])
ax.set_ylabel(axis_labels[1])
ax.set_zlabel(axis_labels[2])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.set_title(title)
# create legend/colorbar
if point_colors is not None:
if category_to_color is None:
fig.colorbar(plot)
else:
self._plot_categorical_legend(ax, category_to_color)
return fig
def _validate_plot_axes(self, coord_matrix, axes):
"""Validate `axes` against coordinates matrix."""
num_dims = coord_matrix.shape[0]
if num_dims < 3:
raise ValueError("At least three dimensions are required to plot "
"ordination results. There are only %d "
"dimension(s)." % num_dims)
if len(axes) != 3:
raise ValueError("axes must contain exactly three elements (found "
"%d elements)." % len(axes))
if len(set(axes)) != 3:
raise ValueError("The values provided for axes must be unique.")
for idx, axis in enumerate(axes):
if axis < 0 or axis >= num_dims:
raise ValueError("axes[%d] must be >= 0 and < %d." %
(idx, num_dims))
def _get_plot_point_colors(self, df, column, ids, cmap):
"""Return a list of colors for each plot point given a metadata column.
If `column` is categorical, additionally returns a dictionary mapping
each category (str) to color (used for legend creation).
"""
if ((df is None and column is not None) or (df is not None and
column is None)):
raise ValueError("Both df and column must be provided, or both "
"must be None.")
elif df is None and column is None:
point_colors, category_to_color = None, None
else:
if column not in df:
raise ValueError("Column '%s' not in data frame." % column)
col_vals = df.loc[ids, column]
if col_vals.isnull().any():
raise ValueError("One or more IDs in the ordination results "
"are not in the data frame, or there is "
"missing data in the data frame's '%s' "
"column." % column)
category_to_color = None
try:
point_colors = col_vals.astype(float)
except ValueError:
# we have categorical data, so choose a color for each
# category, where colors are evenly spaced across the
# colormap.
# derived from http://stackoverflow.com/a/14887119
categories = col_vals.unique()
cmap = plt.get_cmap(cmap)
category_colors = cmap(np.linspace(0, 1, len(categories)))
category_to_color = dict(zip(categories, category_colors))
point_colors = col_vals.apply(lambda x: category_to_color[x])
point_colors = point_colors.tolist()
return point_colors, category_to_color
def _plot_categorical_legend(self, ax, color_dict):
"""Add legend to plot using specified mapping of category to color."""
# derived from http://stackoverflow.com/a/20505720
proxies = []
labels = []
for category in color_dict:
proxy = mpl.lines.Line2D([0], [0], linestyle='none',
c=color_dict[category], marker='o')
proxies.append(proxy)
labels.append(category)
# place legend outside of the axes (centered)
# derived from http://matplotlib.org/users/legend_guide.html
ax.legend(proxies, labels, numpoints=1, loc=6,
bbox_to_anchor=(1.05, 0.5), borderaxespad=0.)
# Here we define the special repr methods that provide the IPython display
# protocol. Code derived from:
# https://github.com/ipython/ipython/blob/2.x/examples/Notebook/
# Custom%20Display%20Logic.ipynb
# See licenses/ipython.txt for more details.
def _repr_png_(self):
return self._figure_data('png')
def _repr_svg_(self):
return self._figure_data('svg')
# We expose the above reprs as properties, so that the user can see them
# directly (since otherwise the client dictates which one it shows by
# default)
@property
@experimental(as_of="0.4.0")
def png(self):
"""Display basic 3-D scatterplot in IPython Notebook as PNG."""
return Image(self._repr_png_(), embed=True)
@property
@experimental(as_of="0.4.0")
def svg(self):
"""Display basic 3-D scatterplot in IPython Notebook as SVG."""
return SVG(self._repr_svg_())
def _figure_data(self, format):
fig = self.plot()
data = print_figure(fig, format)
# We MUST close the figure, otherwise IPython's display machinery
# will pick it up and send it as output, resulting in a double display
plt.close(fig)
return data
def _format_attribute(self, attr, attr_label, formatter):
if attr is None:
formatted_attr = 'N/A'
else:
formatted_attr = formatter(attr)
return '\t%s: %s' % (attr_label, formatted_attr)
class Ordination(object):
short_method_name = 'Overwrite in subclass!'
long_method_name = 'Overwrite in subclass!'
| bsd-3-clause |
great-expectations/great_expectations | great_expectations/expectations/core/expect_column_distinct_values_to_contain_set.py | 1 | 5877 | from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import ExecutionEngine, PandasExecutionEngine
from great_expectations.expectations.util import render_evaluation_parameter_string
from ...render.renderer.renderer import renderer
from ...render.types import RenderedStringTemplateContent
from ...render.util import (
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from ..expectation import ColumnExpectation, InvalidExpectationConfigurationError
from ..metrics.util import parse_value_set
class ExpectColumnDistinctValuesToContainSet(ColumnExpectation):
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column aggregate expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
}
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\
metric_dependencies = ("column.value_counts",)
success_keys = (
"value_set",
"parse_strings_as_datetimes",
)
# Default values
default_kwarg_values = {
"value_set": None,
"parse_strings_as_datetimes": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""Validating that user has inputted a value set and that configuration has been initialized"""
super().validate_configuration(configuration)
try:
assert "value_set" in configuration.kwargs, "value_set is required"
assert isinstance(
configuration.kwargs["value_set"], (list, set, dict)
), "value_set must be a list or a set"
if isinstance(configuration.kwargs["value_set"], dict):
assert (
"$PARAMETER" in configuration.kwargs["value_set"]
), 'Evaluation Parameter dict for value_set kwarg must have "$PARAMETER" key'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"value_set",
"parse_strings_as_datetimes",
"row_condition",
"condition_parser",
],
)
if params["value_set"] is None or len(params["value_set"]) == 0:
values_string = "[ ]"
else:
for i, v in enumerate(params["value_set"]):
params["v__" + str(i)] = v
values_string = " ".join(
["$v__" + str(i) for i, v in enumerate(params["value_set"])]
)
template_str = "distinct values must contain this set: " + values_string + "."
if params.get("parse_strings_as_datetimes"):
template_str += " Values should be parsed as datetimes."
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
parse_strings_as_datetimes = self.get_success_kwargs(configuration).get(
"parse_strings_as_datetimes"
)
observed_value_counts = metrics.get("column.value_counts")
value_set = self.get_success_kwargs(configuration).get("value_set")
if parse_strings_as_datetimes:
parsed_value_set = parse_value_set(value_set)
observed_value_counts.index = pd.to_datetime(observed_value_counts.index)
else:
parsed_value_set = value_set
observed_value_set = set(observed_value_counts.index)
expected_value_set = set(parsed_value_set)
return {
"success": observed_value_set.issuperset(expected_value_set),
"result": {
"observed_value": sorted(list(observed_value_set)),
"details": {"value_counts": observed_value_counts},
},
}
| apache-2.0 |
vybstat/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
tjlane/electrolysis | electrolysis/align.py | 1 | 7236 |
"""
align.py
"""
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
_WARN = False
def blob_align(images, init_peak_loc=None):
"""
Align a series of images on the strongest peak.
Pad your image beforehand if you want to retain information at the
very edges of the image.
"""
# initial condition for peak placement
if init_peak_loc is None:
anchor_pos = np.array([images.shape[1], images.shape[2]]) / 2.0
else:
anchor_pos = init_peak_loc
for i in range(images.shape[0]):
# find the beam in the image by blurring w/Gaussian and peakfinding
# parameters below were chosen to work well with Oct run on UED
gi = ndimage.filters.gaussian_filter(images[i], 2.0)
pos, shp = find_blobs(gi, discard_border=10,
discard_small_blobs=20)
if len(pos) == 0:
raise RuntimeError('No peaks found in image %d' % i)
# choose the closest peak to the previous one
pos = np.array(pos)
diffs = pos - anchor_pos[None,:]
new_pos = pos[ np.argmin(np.square(diffs).sum(1)) ] # L2 norm
# shift image
if i == 0:
anchor_pos = new_pos
else:
shift = anchor_pos - new_pos
print 'Shifting image %d by:'%i, shift
images[i] = ndimage.interpolation.shift(images[i], shift)
return images
def find_blobs(image, sigma_threshold=5.0, discard_border=1,
discard_small_blobs=0):
"""
Find peaks, or `blobs`, in a 2D image.
This algorithm works based on a simple threshold. It finds continuous
regions of intensity that are greater than `sigma_threshold` standard
deviations over the mean, and returns each of those regions as a single
blob.
Parameters
----------
image : np.ndarray, two-dimensional
An image to peakfind on.
Returns
-------
centers : list of tuples of floats
A list of the (x,y)/(col,row) positions of each peak, in pixels.
widths : list of tuples of floats
A list of the (x,y)/(col,row) size of each peak, in pixels.
Optional Parameters
-------------------
sigma_threshold : float
How many standard deviations above the mean to set the binary threshold.
discard_border : int
The size of a border region to ignore. In many images, the borders are
noisy or systematically erroneous.
discard_small_blobs : int
Discard few-pixel blobs, which are the most common false positives
for the blob finder. The argument specifies the minimal area
(in pixels) a blob must encompass to be counted. Default: no
rejections (0 pixels).
Notes
-----
Tests indicate this algorithm takes ~200 ms to process a single image, so
can run at ~5 Hz on a single processor.
"""
if not len(image.shape) == 2:
raise ValueError('Can only process 2-dimensional images')
# discard the borders, which can be noisy...
image[ :discard_border,:] = 0
image[-discard_border:,:] = 0
image[:, :discard_border] = 0
image[:,-discard_border:] = 0
# find the center of blobs above `sigma_threshold` STDs
binary = (image > (image.mean() + image.std() * sigma_threshold))
labeled, num_labels = ndimage.label(binary)
centers = ndimage.measurements.center_of_mass(binary,
labeled,
range(1,num_labels+1))
# for each peak, find it's x- & y-width
# we do this by measuring how many pixels are above 5-sigma in both the
# x and y direction at the center of each blob
widths = []
warning_printed = False
for i in range(num_labels)[::-1]: # backwards so pop works below
c = centers[i]
r_slice = labeled[int(c[0]),:]
zy = np.where( np.abs(r_slice - np.roll(r_slice, 1)) == i+1 )[0]
c_slice = labeled[:,int(c[1])]
zx = np.where( np.abs(c_slice - np.roll(c_slice, 1)) == i+1 )[0]
if not (len(zx) == 2) or not (len(zy) == 2):
if (not warning_printed) and _WARN:
print "WARNING: Peak algorithm confused about width of peak at", c
print " Setting default peak width (5,5). This warning"
print " will only be printed ONCE. Proceed w/caution!"
warning_printed = True
widths.append( (5.0, 5.0) )
else:
x_width = zx[1] - zx[0]
y_width = zy[1] - zy[0]
# if the blob is a "singleton" and we want to get rid
# of it, we do so, otherwise we add the widths
if (x_width * y_width) < discard_small_blobs:
#print "Discarding small blob %d, area %d" % (i, (x_width * y_width))
centers.pop(i)
else:
widths.append( (x_width, y_width) )
assert len(centers) == len(widths), 'centers and widths not same len'
return centers, widths
def draw_blobs(image, centers, widths):
"""
Draw blobs (peaks) on an image.
Parameters
----------
image : np.ndarray, two-dimensional
An image to render.
centers : list of tuples of floats
A list of the (x,y) positions of each peak, in pixels.
widths : list of tuples of floats
A list of the (x,y) size of each peak, in pixels.
"""
plt.figure()
plt.imshow(image.T, interpolation='nearest')
centers = np.array(centers)
widths = np.array(widths)
# flip the y-sign to for conv. below
diagonal_widths = widths.copy()
diagonal_widths[:,1] *= -1
for i in range(len(centers)):
# draw a rectangle around the center
pts = np.array([
centers[i] - widths[i] / 2.0, # bottom left
centers[i] - diagonal_widths[i] / 2.0, # top left
centers[i] + widths[i] / 2.0, # top right
centers[i] + diagonal_widths[i] / 2.0, # bottom right
centers[i] - widths[i] / 2.0 # bottom left
])
plt.plot(pts[:,0], pts[:,1], color='orange', lw=3)
plt.xlim([0, image.shape[0]])
plt.ylim([0, image.shape[1]])
plt.show()
return
def _test_alignment():
#from skimage import data
#i0 = data.camera()
i0 = np.zeros((100, 100))
i0[50:55,50:55] = 1000.0
s1 = np.random.randn(2)*10.0
i1 = ndimage.interpolation.shift(i0, s1)
print s1
i2 = ndimage.interpolation.shift(i0, np.random.randn(2)*10.0)
images = np.array([i0, i1, i2])
unaligned = np.copy(images)
blob_align(images)
from matplotlib import pyplot as plt
plt.figure()
ax = plt.subplot(121)
ax.imshow(unaligned.sum(0))
ax = plt.subplot(122)
ax.imshow(images.sum(0))
plt.show()
if __name__ == "__main__":
_test_alignment()
| mit |
dirkcgrunwald/RTLSDR-Scanner | src/printer.py | 3 | 2990 | #
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from PIL import Image
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg
import wx
class PrintOut(wx.Printout):
def __init__(self, graph, filename, pageConfig):
wx.Printout.__init__(self, title=filename)
self.figure = graph.get_figure()
margins = (pageConfig.GetMarginTopLeft().Get()[0],
pageConfig.GetMarginTopLeft().Get()[1],
pageConfig.GetMarginBottomRight().Get()[0],
pageConfig.GetMarginBottomRight().Get()[1])
self.margins = [v / 25.4 for v in margins]
def __draw_image(self, sizeInches, ppi):
oldSize = self.figure.get_size_inches()
oldDpi = self.figure.get_dpi()
self.figure.set_size_inches(sizeInches)
self.figure.set_dpi(ppi)
canvas = FigureCanvasAgg(self.figure)
canvas.draw()
renderer = canvas.get_renderer()
if matplotlib.__version__ >= '1.2':
buf = renderer.buffer_rgba()
else:
buf = renderer.buffer_rgba(0, 0)
size = canvas.get_width_height()
image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
self.figure.set_size_inches(oldSize)
self.figure.set_dpi(oldDpi)
imageWx = wx.EmptyImage(image.size[0], image.size[1])
imageWx.SetData(image.convert('RGB').tostring())
return imageWx
def GetPageInfo(self):
return 1, 1, 1, 1
def HasPage(self, page):
return page == 1
def OnPrintPage(self, _page):
dc = self.GetDC()
if self.IsPreview():
ppi = max(self.GetPPIScreen())
sizePixels = dc.GetSize()
else:
ppi = max(self.GetPPIPrinter())
sizePixels = self.GetPageSizePixels()
width = (sizePixels[0] / ppi) - self.margins[1] - self.margins[3]
height = (sizePixels[1] / ppi) - self.margins[0] - self.margins[2]
sizeInches = (width, height)
image = self.__draw_image(sizeInches, ppi)
dc.DrawBitmap(image.ConvertToBitmap(),
self.margins[0] * ppi,
self.margins[1] * ppi)
return True
| gpl-3.0 |
AtsushiHashimoto/fujino_mthesis | tools/flowgraph/obtain_recipes_from_ingredients.py | 1 | 2937 | # _*_ coding: utf-8 -*-
import os
import json
import argparse
import numpy as np
import pandas as pd
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from module import ontology
from module.CookpadRecipe import CookpadRecipe
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('-ingredients', help=u'ingredient',
default="")
parser.add_argument('-synonym_path', help=u'ontology/synonym.tsv',
default="/home/fujino/work/data/ontology/synonym.tsv")
parser.add_argument('-rcp_loc_steps_path', help=u'recipe_id:dictionary("dir":directory, "steps":list of step_no)',
default="/home/fujino/work/output/recipe_image_directory.json")
parser.add_argument('-output_dir', help=u'output directory',
default="/home/fujino/work/output/cookpaddata")
parser.add_argument('-seed', help=u'numpy random seed', type=int,
default=0)
parser.add_argument('-usr', help=u'mysql usr',
default="fujino")
parser.add_argument('-password', help=u'mysql password',
default="")
parser.add_argument('-socket', help=u'mysql socket',
default="/var/run/mysqld/mysqld.sock")
parser.add_argument('-host', help=u'mysql host',
default="localhost")
parser.add_argument('-db', help=u'mysql db',
default="cookpad_data")
params = parser.parse_args()
return vars(params)
def main(ingredients, synonym_path, rcp_loc_steps_path, output_dir, seed,
usr, password, socket, host, db):
synonym = ontology.load_food_synonym(synonym_path, key="concept")
with open(rcp_loc_steps_path, 'r') as fin:
rcp_loc_steps = json.load(fin)
cookpad_recipe = CookpadRecipe(usr, password, socket, host, db)
ingredients = [unicode(ingredient, encoding="utf-8") for ingredient in ingredients.split(",")]
np.random.seed(seed)
def prn(x):
print x.encode('utf-8')
for ingredient in ingredients:
print ingredient.encode('utf-8')
#map(prn,synonym.keys())
swings = synonym[ingredient]
print " ".join(swings).encode('utf-8')
recipes = cookpad_recipe.get_recipe_ids_from_ingredients(swings)
# save recipes which contain more than one images
recipes = [r for r in recipes if r in rcp_loc_steps]
if len(recipes) > 0:
print "save..."
df = pd.DataFrame(recipes)
df = df.iloc[np.random.permutation(len(df))]
df.to_csv(os.path.join(output_dir, 'recipes_%s.tsv' % ingredient),
sep="\t", encoding="utf-8", index=False, header=None)
else:
print "There are no recipes which contains", swings
if __name__ == '__main__':
params = parse()
main(**params)
| bsd-2-clause |
mrmeswani/Robotics | RoboND-Rover-Project/src/drive_rover.py | 1 | 9178 | # Do the necessary imports
import argparse
import shutil
import base64
from datetime import datetime
import os
import cv2
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO, StringIO
import json
import pickle
import matplotlib.image as mpimg
import time
# Import functions for perception and decision making
from perception import perception_step
from decision import decision_step
from supporting_functions import update_rover, create_output_images
# Initialize socketio server and Flask application
# (learn more at: https://python-socketio.readthedocs.io/en/latest/)
sio = socketio.Server()
app = Flask(__name__)
# Read in ground truth map and create 3-channel green version for overplotting
# NOTE: images are read in by default with the origin (0, 0) in the upper left
# and y-axis increasing downward.
ground_truth = mpimg.imread('../calibration_images/map_bw.png')
# This next line creates arrays of zeros in the red and blue channels
# and puts the map into the green channel. This is why the underlying
# map output looks green in the display image
ground_truth_3d = np.dstack((ground_truth*0, ground_truth*255, ground_truth*0)).astype(np.float)
# Define RoverState() class to retain rover state parameters
class RoverState():
def __init__(self):
self.start_time = None # To record the start time of navigation
self.total_time = None # To record total duration of naviagation
self.img = None # Current camera image
self.pos = None # Current position (x, y)
self.yaw = None # Current yaw angle
self.pitch = None # Current pitch angle
self.roll = None # Current roll angle
self.vel = None # Current velocity
self.steer = 0 # Current steering angle
self.throttle = 0 # Current throttle value
self.brake = 0 # Current brake value
self.nav_angles = None # Angles of navigable terrain pixels
self.nav_dists = None # Distances of navigable terrain pixels
self.obs_angles = None # Angles of obstacle pixels
self.obs_dists = None # Distances of obstacle pixels
self.rock_angles = None # Angles of rock pixels
self.rock_dists = None # Distances of rock pixels
self.ground_truth = ground_truth_3d # Ground truth worldmap
self.mode = 'forward' # Current mode (can be forward or stop)
self.throttle_set = 0.2 # Throttle setting when accelerating
self.brake_set = 10 # Brake setting when braking
# The stop_forward and go_forward fields below represent total count
# of navigable terrain pixels. This is a very crude form of knowing
# when you can keep going and when you should stop. Feel free to
# get creative in adding new fields or modifying these!
self.stop_forward = 100 # Threshold to initiate stopping
self.go_forward = 500 # Threshold to go forward again
self.max_vel = 2 # Maximum velocity (meters/second)
self.halted = 0 #keeps track how many consecutive img samples we are at zero speed
# Image output from perception step
# Update this image to display your intermediate analysis steps
# on screen in autonomous mode
self.vision_image = np.zeros((160, 320, 3), dtype=np.float)
# Worldmap
# Update this image with the positions of navigable terrain
# obstacles and rock samples
self.worldmap = np.zeros((200, 200, 3), dtype=np.float)
self.samples_pos = None # To store the actual sample positions
self.samples_to_find = 0 # To store the initial count of samples
self.samples_located = 0 # To store number of samples located on map
self.samples_collected = 0 # To count the number of samples collected
self.near_sample = 0 # Will be set to telemetry value data["near_sample"]
self.picking_up = 0 # Will be set to telemetry value data["picking_up"]
self.send_pickup = False # Set to True to trigger rock pickup
self.Debug=0 # Debug mode, defaults to false
self.perc_mapped = 0
self.fidelity = 0
self.prev_pos = 0 # prev pos
self.prev_yaw = 0 # prev yaw
self.clockwisecount = 0
self.anticlockwisecount = 0
self.prev_perc_mapped = 0
self.nomap_count = 0 # How many samples we are not mapping
# Initialize our rover
Rover = RoverState()
# Variables to track frames per second (FPS)
# Intitialize frame counter
frame_counter = 0
# Initalize second counter
second_counter = time.time()
fps = None
# Define telemetry function for what to do with incoming data
@sio.on('telemetry')
def telemetry(sid, data):
global frame_counter, second_counter, fps
frame_counter+=1
# Do a rough calculation of frames per second (FPS)
if (time.time() - second_counter) > 1:
fps = frame_counter
frame_counter = 0
second_counter = time.time()
print("Current FPS: {}".format(fps))
if data:
global Rover
Rover.prev_pos = Rover.pos # save prev pos before getting current data
Rover.prev_yaw = Rover.yaw
Rover.prev_perc_mapped = Rover.perc_mapped
# Initialize / update Rover with current telemetry
Rover, image = update_rover(Rover, data)
if np.isfinite(Rover.vel):
# Execute the perception and decision steps to update the Rover's state
Rover = perception_step(Rover)
Rover = decision_step(Rover)
# Create output images to send to server
out_image_string1, out_image_string2 = create_output_images(Rover)
# The action step! Send commands to the rover!
# Don't send both of these, they both trigger the simulator
# to send back new telemetry so we must only send one
# back in respose to the current telemetry data.
# If in a state where want to pickup a rock send pickup command
if Rover.send_pickup and not Rover.picking_up:
send_pickup()
# Reset Rover flags
Rover.send_pickup = False
else:
# Send commands to the rover!
commands = (Rover.throttle, Rover.brake, Rover.steer)
send_control(commands, out_image_string1, out_image_string2)
# In case of invalid telemetry, send null commands
else:
# Send zeros for throttle, brake and steer and empty images
send_control((0, 0, 0), '', '')
# If you want to save camera images from autonomous driving specify a path
# Example: $ python drive_rover.py image_folder_path
# Conditional to save image frame if folder was specified
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
image.save('{}.jpg'.format(image_filename))
else:
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control((0, 0, 0), '', '')
sample_data = {}
sio.emit(
"get_samples",
sample_data,
skip_sid=True)
def send_control(commands, image_string1, image_string2):
# Define commands to be sent to the rover
data={
'throttle': commands[0].__str__(),
'brake': commands[1].__str__(),
'steering_angle': commands[2].__str__(),
'inset_image1': image_string1,
'inset_image2': image_string2,
}
# Send commands via socketIO server
sio.emit(
"data",
data,
skip_sid=True)
eventlet.sleep(0)
# Define a function to send the "pickup" command
def send_pickup():
print("Picking up")
pickup = {}
sio.emit(
"pickup",
pickup,
skip_sid=True)
eventlet.sleep(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
parser.add_argument(
'-debug',
action='store_true')
args = parser.parse_args()
#os.system('rm -rf IMG_stream/*')
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("Recording this run ...")
else:
print("NOT recording this run ...")
if args.debug :
print("Debug mode enabled")
Rover.Debug = 1
else:
print("Debug mode if OFF")
Rover.Debug = 0
# wrap Flask application with socketio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| gpl-3.0 |
ran5515/DeepDecision | tensorflow/python/estimator/canned/dnn_test.py | 20 | 16058 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifier(*args, **kwargs)
class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn)
class DNNClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressor(*args, **kwargs)
class DNNRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
| apache-2.0 |
zorojean/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/dataframe/dataframe.py | 85 | 4704 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
| apache-2.0 |
ElinorSun/Soledad | build/evaluate/prepare3dedges.py | 2 | 5210 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 9 08:33:31 2016
@author: fschenk
"""
import cv2
import cv
import numpy as np
import matplotlib.pyplot as plt
#test = []
#
#res = cv.Load('/datawl/CSI/CSI_SVN/Kiras CSISmartCam3D/CSISmartScan3D_Daten/Software/InfiniTAM-build/my.xml')
#pcl = cv.Load('/datawl/CSI/CSI_SVN/Kiras CSISmartCam3D/CSISmartScan3D_Daten/Software/InfiniTAM-build/pcl.xml')
##res = cv2.imread('/datawl/CSI/CSI_SVN/Kiras CSISmartCam3D/CSISmartScan3D_Daten/Daten_Testszenen/TUM/raycast_image.exr');
#fx = 525.0
#fy = 525.0
#cx = 319.5
#cy = 239.5
#K = np.zeros([3,3])
#K[0,0] = fx; K[1,1] = fy;
#K[0,2] = cx; K[1,2] = cy;
#K[2,2] = 1;
#res = pcl
#d = np.zeros([480,640])
#for xx in range(640):
# for yy in range(480):
# _2d = K.dot(res[yy,xx][0:3])
# x = _2d[0]/_2d[2]
# y = _2d[1]/_2d[2]
# if (x >= 0 and x < 640 and y >= 0 and y < 480):
# d[y,x] = res[yy,xx][2];
#d2 = d*1.0/0.005
#plt.imshow(d2)
#stop
def unprojectPtsWithDepth(edges,depth):
nPoints = 0
_3dedges = []
#points = []
for xx in range(edges.shape[1]):
for yy in range(edges.shape[0]):
if (edges[yy,xx] > 0 and depth[yy,xx] > 0.1):
nPoints += 1
Z = depth[yy,xx]
X = Z * (xx-cx) / fx
Y = Z * (yy-cy) / fy
_3dedges.append([X,Y,Z])
#print X,Y,Z
#color = rgb[yy,xx];
#points.append("%f %f %f %d %d %d 0\n"%(X,Y,Z,color[0],color[1],color[2]))
#print nPoints
#file = open("test_ply.ply","w")
#file.write('''ply
#format ascii 1.0
#element vertex %d
#property float x
#property float y
#property float z
#property uchar red
#property uchar green
#property uchar blue
#property uchar alpha
#end_header
#%s
#'''%(len(points),"".join(points)))
#file.close()
return _3dedges
def transform3DPcl(_3dedges,R,t,K):
_3dedges = np.matrix(_3dedges).transpose()
_3dedges_transf = R.T*(_3dedges-t.reshape([3,1]))
#_3dedges_transf = (R*_3dedges+t.reshape([3,1]))
_3dedges_transf /= _3dedges_transf[2,:]
return K*_3dedges_transf
def generateEdgeImg(_2dreproj,shape):
newEdges = np.zeros(shape,dtype='uint8')
for i in range(_2dreproj.shape[1]):
x = (np.floor(_2dreproj[0,i]))
y = (np.floor(_2dreproj[1,i]))
#print _3dedges_transf[0,i],_3dedges_transf[1,i],x,y
if (x >= 0 and y >= 0 and x < newEdges.shape[1] and y < newEdges.shape[0]):
newEdges[y,x] = 255
return newEdges
rgb = cv2.imread('/datawl/CSI/CSI_SVN/Kiras CSISmartCam3D/CSISmartScan3D_Daten/Daten_Testszenen/TUM/freiburg1/rgbd_dataset_freiburg1_desk/rgb/1305031453.359684.png');
depth = cv2.imread('/datawl/CSI/CSI_SVN/Kiras CSISmartCam3D/CSISmartScan3D_Daten/Daten_Testszenen/TUM/freiburg1/rgbd_dataset_freiburg1_desk/depth/1305031453.374112.png',cv2.IMREAD_UNCHANGED);
depth = depth/5000.0;
gray = cv2.cvtColor(rgb,cv2.COLOR_BGR2GRAY)
edges = np.array([])
edges = cv2.Canny(gray,150,100,edges,3,True)
fx = 525.0
fy = 525.0
cx = 319.5
cy = 239.5
K = np.zeros([3,3])
K[0,0] = fx; K[1,1] = fy;
K[0,2] = cx; K[1,2] = cy;
K[2,2] = 1;
_3dedges = unprojectPtsWithDepth(edges,depth)
R = np.matrix([[0.999693,-0.0220359,0.0113385],[0.022001,0.999753,0.00319164],[-0.0114061,-0.0029412,0.99993]])
t=np.array([0.00271005,0.0022586,-0.00904049])
_2dreproj1 = transform3DPcl(_3dedges,R,t,K);
newEdges = generateEdgeImg(_2dreproj1,edges.shape[:])
R = np.matrix([[0.999698, -0.0223397, 0.0102584],[0.0223036, 0.999745, 0.00361705],[-0.0103366, -0.00338715, 0.999941]])
t=np.array([0.01400186,0.0209673,-0.089684])
_2dreproj2 =transform3DPcl(_3dedges,R,t,K)
newEdges2 = generateEdgeImg(_2dreproj2,edges.shape[:])
edges_dt =cv2.distanceTransform(255-edges,distanceType=cv.CV_DIST_L2,maskSize=cv.CV_DIST_MASK_PRECISE)
plt.figure()
plt.imshow(edges_dt,cmap = plt.get_cmap('gray'))
plt.title("distance transform")
plt.figure()
plt.imshow(edges,cmap = plt.get_cmap('gray'));
plt.title('orig edges')
plt.figure()
plt.imshow(newEdges,cmap = plt.get_cmap('gray'));
plt.title('new edges')
plt.figure()
plt.imshow(newEdges2,cmap = plt.get_cmap('gray'));
plt.title('new edges2')
new_edges_dt =cv2.distanceTransform(255-newEdges,distanceType=cv.CV_DIST_L2,maskSize=cv.CV_DIST_MASK_PRECISE)
plt.figure()
plt.imshow(new_edges_dt,cmap = plt.get_cmap('gray'))
plt.title("distance transform new edges")
new_edges_dt2 =cv2.distanceTransform(255-newEdges2,distanceType=cv.CV_DIST_L2,maskSize=cv.CV_DIST_MASK_PRECISE)
plt.figure()
plt.imshow(new_edges_dt2,cmap = plt.get_cmap('gray'))
plt.title("distance transform new edges")
#evaluate against three possibilities
sumOrig = 0.0;
sumEdges1 = 0.0;
sumSelf = 0.0;
#now evaluate the distances
for xx in range(newEdges2.shape[1]):
for yy in range(newEdges2.shape[0]):
if (newEdges2[yy,xx] > 0):
sumOrig += edges_dt[yy,xx]
sumEdges1 += new_edges_dt[yy,xx]
sumSelf += new_edges_dt2[yy,xx]
print sumOrig, sumEdges1, sumSelf
#plt.figure()
#plt.imshow(abs(edges-newEdges*255),cmap = plt.get_cmap('gray'))
#plt.title('diff edges') | gpl-3.0 |
leesavide/pythonista-docs | Documentation/matplotlib/examples/old_animation/dynamic_collection.py | 9 | 1371 | import random
from matplotlib.collections import RegularPolyCollection
import matplotlib.cm as cm
from matplotlib.pyplot import figure, show
from numpy.random import rand
fig = figure()
ax = fig.add_subplot(111, xlim=(0,1), ylim=(0,1), autoscale_on=False)
ax.set_title("Press 'a' to add a point, 'd' to delete one")
# a single point
offsets = [(0.5,0.5)]
facecolors = [cm.jet(0.5)]
collection = RegularPolyCollection(
#fig.dpi,
5, # a pentagon
rotation=0,
sizes=(50,),
facecolors = facecolors,
edgecolors = 'black',
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
ax.add_collection(collection)
def onpress(event):
"""
press 'a' to add a random point from the collection, 'd' to delete one
"""
if event.key=='a':
x,y = rand(2)
color = cm.jet(rand())
offsets.append((x,y))
facecolors.append(color)
collection.set_offsets(offsets)
collection.set_facecolors(facecolors)
fig.canvas.draw()
elif event.key=='d':
N = len(offsets)
if N>0:
ind = random.randint(0,N-1)
offsets.pop(ind)
facecolors.pop(ind)
collection.set_offsets(offsets)
collection.set_facecolors(facecolors)
fig.canvas.draw()
fig.canvas.mpl_connect('key_press_event', onpress)
show()
| apache-2.0 |
meteorcloudy/tensorflow | tensorflow/contrib/factorization/python/ops/kmeans_test.py | 16 | 21830 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.factorization.python.ops import kmeans as kmeans_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig().replace(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
steps = 10 * self.num_points // self.batch_size
kmeans.train(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.train(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertNear(self.true_score, score, self.true_score * 0.01)
def _infer_helper(self, kmeans, clusters, num_points):
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
input_fn = self.input_fn(batch_size=num_points, points=points, num_epochs=1)
# Test predict
assignments = list(kmeans.predict_cluster_index(input_fn))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = list(kmeans.transform(input_fn))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) -
2 * np.dot(points, np.transpose(clusters)) + np.transpose(
np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.cluster_centers()
# Run inference on small datasets.
self._infer_helper(kmeans, clusters, 10)
self._infer_helper(kmeans, clusters, 1)
def _parse_feature_dict_helper(self, features, parsed_feature_dict):
# Perform a sanity check.
self.assertEqual(features.shape, parsed_feature_dict.shape)
self.assertEqual(features.dtype, parsed_feature_dict.dtype)
# Then check that running the tensor yields the original list of points.
with self.test_session() as sess:
parsed_points = sess.run(parsed_feature_dict)
self.assertAllEqual(self.points, parsed_points)
def test_parse_features(self):
"""Tests the various behaviours of kmeans._parse_features_if_necessary."""
# No-op if a tensor is passed in.
features = constant_op.constant(self.points)
parsed_features = kmeans_lib._parse_features_if_necessary(features, None)
self.assertAllEqual(features, parsed_features)
# All values from a feature dict are transformed into a tensor.
feature_dict = {
'x': [[point[0]] for point in self.points],
'y': [[point[1]] for point in self.points]
}
parsed_feature_dict = kmeans_lib._parse_features_if_necessary(
feature_dict, None)
self._parse_feature_dict_helper(features, parsed_feature_dict)
# Only the feature_columns of a feature dict are transformed into a tensor.
feature_dict_with_extras = {
'foo': 'bar',
'x': [[point[0]] for point in self.points],
'baz': {'fizz': 'buzz'},
'y': [[point[1]] for point in self.points]
}
feature_columns = [fc.numeric_column(key='x'), fc.numeric_column(key='y')]
parsed_feature_dict = kmeans_lib._parse_features_if_necessary(
feature_dict_with_extras, feature_columns)
self._parse_feature_dict_helper(features, parsed_feature_dict)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.train(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0,
keepdims=True))[0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.train(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.cluster_centers())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = list(
self.kmeans.transform(
input_fn=self.input_fn(batch_size=self.num_points, num_epochs=1)))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
assignments = list(
self.kmeans.predict_cluster_index(
input_fn=self.input_fn(num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points))
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concentrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.train(
input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.cluster_centers())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_index(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=self.config(3))
tf_kmeans.train(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.cluster_centers()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None)))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.train(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.train(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
JMill/edX-Learning-From-Data-Solutions-jm | Homework_8/Python/hw8.py | 3 | 11463 | import os
import random
import numpy as np
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from sklearn.svm import SVC
from sklearn import cross_validation
#
# points_in = number of learning points
# points_out = number of test points
#
#read from file: thanks to vbipin
def read_data( filename ) :
with open(filename, "r") as f:
data = []
for line in f:
if line.strip() : #some empty lines are skipped if present.
y, x1, x2 = line.split()
data.append( [ int(float(y)), float(x1), float(x2)] ) #data will be list of lists.
return data
#eg: If you want the y and x of say 1_vs_all call
# y,x = d_vs_all( 1, data )
def d_vs_all(d, data) :
#take out the y and x from data.
y = [data[i][0] for i in range(len(data)) ] #simple list
x = [[data[i][1], data[i][2]] for i in range(len(data)) ] #list of list
#we need to put d as 1 and rest as -1 in y
y_new = []
contd = 0
for i in range(len(y)) :
if y[i] == d :
#if abs( float( y[i] ) - d ) < 0.01 :
y_new.append( 1.0 )
contd += 1
else :
y_new.append( -1.0 )
#we do not want the np array.
return y_new, x
#if you want 1 vs 5 call
# y,x = d_vs_d( 1, 5, data)
def d_vs_d(d1, d2, data) :
#take out the y and x from data.
y = [data[i][0] for i in range(len(data)) ] #simple list
x = [[data[i][1], data[i][2]] for i in range(len(data)) ] #list of list
#we need to put d as 1 and rest as -1 in y
y_new = []
x_new = []
for i in range(len(y)) :
if y[i] == d1 :
#if abs( float( y[i] ) - d1 ) < 0.01 :
y_new.append( 1.0 )
x_new.append( x[i] )
if y[i] == d2 :
#if abs( float( y[i] ) - d1 ) < 0.01 :
y_new.append( -1.0 )
x_new.append( x[i] )
return y_new, x_new
#for P2 : start = 0 stop = 10
#for P3-P4 : start = 1, stop = 11
#for P5-P10: start = 0, stop = 0
start = 0
stop = 0
filename = "/Users/danielamaurizio/Documents/EDX-courses/CS1155x/Exercises/features.train"
data = read_data(filename)
print data[:20]
points_in = len(data)
filenameout = "/Users/danielamaurizio/Documents/EDX-courses/CS1155x/Exercises/features.test"
dataout = read_data(filenameout)
print dataout[:20]
points_out = len(dataout)
bestEin = 100.0
bestNSV = 0
jbest = -1
worstEin = 0.0
worstNSV = 0
jworst = -1
#following code is for P2-4
for j in range(start, stop, 2):
coorxpos = []
coorypos = []
coorxneg = []
cooryneg = []
yn = []
x = []
yn, x = d_vs_all(j, data)
#print yn[:20]
#print x[:20]
X = np.array(x)
YN = np.array(yn)
countj = 0
for i in range(len(yn)):
if yn[i] == 1.0:
countj +=1
print j, countj
#insert code for quadratic programming SVM
# fit the model
clf = SVC(C = 0.01, kernel = 'poly', degree = 2, gamma = 1.0, coef0 = 1.0)
clf.fit(X, YN)
#print 'dual_coef_ holds the product yn_i * alpha_i'
lagcoef = list(np.array(clf.dual_coef_).reshape(-1,))
#print lagcoef
#print 'w from svc'
#print clf.coef_
print 'support vectors from SLSQP'
supvec = clf.support_vectors_
#print supvec
print 'indexes of support vectors and number of SV'
ind = clf.support_
nsv = len(ind)
print ind, nsv
alfacalc = []
#calculate alfas
for i in range(len(ind)):
m = ind[i]
alfacalc.append(-lagcoef[i]/yn[m])
#print 'alfas'
#print alfacalc
#calculate vector w and b from qp results
wvecsvm = np.zeros(shape = (1, 2))
for i in range(nsv):
wvecsvm += alfacalc[i]*yn[ind[i]]*supvec[i]
print 'calculated vectors from alfas'
wsvm = list(np.array(wvecsvm).reshape(-1,))
print wsvm
b = []
averageb = 0.0
for i in range(nsv):
temp = 1.0/yn[ind[i]] - np.matrix.item(np.dot(supvec[i],wvecsvm.T))
b.append(temp)
averageb += temp
averageb /= nsv
print 'b average'
print averageb
y_pred_in = clf.predict(X)
Ein = 0.0
for n in range(points_in):
if y_pred_in[n] <> YN[n]:
Ein += 1.0
Ein /= float(points_in)
if Ein < bestEin:
bestEin = Ein
bestNSV = nsv
jbest = j
if Ein > worstEin:
worstEin = Ein
worstNSV = nsv
jworst = j
sample_out = []
yside = []
yside, sample_out = d_vs_all(j, dataout)
for i in range(len(yside)):
if yside[i] == 1:
coorxpos.append(sample_out[i][0])
coorypos.append(sample_out[i][1])
else:
coorxneg.append(sample_out[i][0])
cooryneg.append(sample_out[i][1])
xqp_out = np.array(sample_out)
print 'out of sample data for svm test'
print xqp_out[:20], yside[:20]
y_pred = clf.predict(xqp_out)
print y_pred[:20]
Eout = 0.0
for i in range(points_out):
if y_pred[i] <> yside[i]:
Eout += 1.0
Eout /= float(points_out)
print j, Ein, Eout
figXR = plt.figure()
ax2 = figXR.add_subplot(111)
plt.plot(coorxpos, coorypos, 'b*', label = 'positive')
plt.plot(coorxneg, cooryneg, 'ro', label = 'negative')
#plt.ylim(-1.0, 1.0)
plt.xlim(-1.0, 1.0)
plt.xlabel('x')
plt.ylabel('y')
plt.title('digit ='+str(j))
plt.legend(loc=1)
plt.grid(True)
plt.draw()
#plt.show()
print 'j, NSV for best Ein'
print jbest, bestNSV, bestEin
print 'j, NSV for worst Ein'
print jworst, worstNSV, worstEin
#following code is for P5 & P6
print '\n start p5 P6 '
yn = []
x = []
#if kval = 0 no validation test
kval = 10
yn, x = d_vs_d(1, 5, data)
print yn[:10]
print yn[10:20]
print yn[20:30]
print x[:10]
print x[10:20]
print x[20:30]
if kval == 0:
X = np.array(x)
YN = np.array(yn)
count1 = 0
count5 = 0
points_in = len(yn)
for i in range(len(yn)):
if yn[i] == 1.0:
count1 += 1
else:
count5 += 1
print ' number of 1s and 5s'
print ('sample_in = ' + str(points_in))
print count1, count5
#insert code for quadratic programming SVM
# fit the model for Q= 2 and Q=5 then for C in [0.001, 0.01, 0.1, 1]
Q = [2, 5]
uplim = [0.0001, 0.001, 0.01, 0.1, 1.0]
for deg in Q:
print ('polynomial degree: ' +str(deg))
for ul in uplim:
print ('value of C: ' + str(ul))
clf = SVC(C = ul, kernel = 'poly', degree = deg, gamma = 1.0, coef0 = 1.0)
clf.fit(X, YN)
#print 'support vectors from SLSQP'
supvec = clf.support_vectors_
#print supvec
#print 'indexes of support vectors and number of SV'
ind = clf.support_
nsv = len(ind)
#print ind, nsv
y_pred_in = clf.predict(X)
Ein = 0.0
for n in range(points_in):
if y_pred_in[n] <> YN[n]:
Ein += 1.0
Ein /= float(points_in)
sample_out = []
yside = []
yside, sample_out = d_vs_d(1, 5, dataout)
xqp_out = np.array(sample_out)
points_out = len(yside)
#print 'out of sample data for svm test'
#print xqp_out[:20], yside[:20]
print ('sample_out = ' + str(points_out))
y_pred = clf.predict(xqp_out)
#print y_pred[:20]
Eout = 0.0
for i in range(points_out):
if y_pred[i] <> yside[i]:
Eout += 1.0
Eout /= float(points_out)
print nsv, Ein, Eout
else:
ntrials = 100
X = np.array(x)
YN = np.array(yn)
averageEbest = [0.0, 0.0, 0.0, 0.0, 0.0]
countbest = [0, 0, 0, 0, 0]
for i in range(ntrials):
bestEval = 1.0
bestUL = 0.0
#shuffle data
kf = cross_validation.KFold(len(YN), kval, indices=False, shuffle=True)
for train, test in kf:
X_t = X[train,:]
Y_t = YN[train]
X_v = X[test,:]
Y_v = YN[test]
#insert code for quadratic programming SVM
# fit the model for Q= 2 then for C in [0.0001, 0.001, 0.01, 0.1, 1]
Q = [2]
uplim = [0.0001, 0.001, 0.01, 0.1, 1.0]
for deg in Q:
#print ('polynomial degree: ' +str(deg))
for ul in uplim:
#print ('value of C: ' + str(ul))
clf = SVC(C = ul, kernel = 'poly', degree = deg, gamma = 1.0, coef0 = 1.0)
#cv = cross_validation.StratifiedShuffleSplit(YN, n_iter=10, test_size=0.1)
#cv = cross_validation.ShuffleSplit(len(YN), n_iter=10, test_size=0.1)
#scores = cross_validation.cross_val_score(clf, X, YN, cv=cv, scoring='accuracy')
clf.fit(X_t, Y_t)
scores = clf.score(X_v, Y_v)
Eval = 1-scores.mean()
if Eval < bestEval:
bestEval = Eval
bestUL = ul
if Eval == bestEval:
bestUL = min(bestUL, ul)
#print 'rsults for best Eval'
#print i, bestUL, bestEval
bestindex = uplim.index(bestUL)
#print 'index ' + str(bestindex)
countbest[bestindex] += 1
averageEbest[bestindex] += bestEval
for k in range(len(averageEbest)):
averageEbest[k] /= float(ntrials)
print 'results for validation run'
print countbest, max(countbest), countbest.index(max(countbest)), uplim[countbest.index(max(countbest))]
print 'average Eval ' + str(averageEbest)
#for problems 9-10
#data are from line 215
print '\n ****************************'
print 'start rbf model'
X = np.array(x)
YN = np.array(yn)
count1 = 0
count5 = 0
points_in = len(yn)
for i in range(len(yn)):
if yn[i] == 1.0:
count1 += 1
else:
count5 += 1
print ' number of 1s and 5s'
print ('sample_in = ' + str(points_in))
print count1, count5
#insert code for quadratic programming SVM
# fit the model for Q= 2 and Q=5 then for C in [0.001, 0.01, 0.1, 1]
uplim = [0.01, 1.0, 100.0, 10**4, 10**6]
for ul in uplim:
print ('value of C: ' + str(ul))
clf = SVC(C = ul, kernel = 'rbf', degree = 0, gamma = 1.0, coef0 = 1.0)
clf.fit(X, YN)
#print 'support vectors from SLSQP'
supvec = clf.support_vectors_
#print supvec
#print 'indexes of support vectors and number of SV'
ind = clf.support_
nsv = len(ind)
#print ind, nsv
y_pred_in = clf.predict(X)
Ein = 0.0
for n in range(points_in):
if y_pred_in[n] <> YN[n]:
Ein += 1.0
Ein /= float(points_in)
sample_out = []
yside = []
yside, sample_out = d_vs_d(1, 5, dataout)
xqp_out = np.array(sample_out)
points_out = len(yside)
#print 'out of sample data for svm test'
#print xqp_out[:20], yside[:20]
print ('sample_out = ' + str(points_out))
y_pred = clf.predict(xqp_out)
#print y_pred[:20]
Eout = 0.0
for i in range(points_out):
if y_pred[i] <> yside[i]:
Eout += 1.0
Eout /= float(points_out)
print nsv, Ein, Eout | apache-2.0 |
phoebe-project/phoebe2-docs | development/examples/spot_transit.py | 2 | 4056 | #!/usr/bin/env python
# coding: utf-8
# Spot Transit
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# As always, let's do imports and initialize a logger and a new bundle.
# In[2]:
import phoebe
import numpy as np
b = phoebe.default_binary()
# Let's set reasonable (although not necessarily physical) values for the secondary component.
# In[3]:
b.flip_constraint('mass@secondary', solve_for='q')
b.set_value(qualifier='mass', component='secondary', value=0.2)
b.set_value(qualifier='requiv', component='secondary', value=0.2)
b.set_value(qualifier='teff', component='secondary', value=300)
# We'll add a spot to the primary component.
# In[4]:
b.add_spot(component='primary',
relteff=0.90,
long=0,
colat=90,
radius=20,
feature='spot01')
# Adding Datasets
# -------------------
# In[5]:
b.add_dataset('lc', compute_times=phoebe.linspace(-0.1, 0.1, 201))
# Because we have such a cool transiting object, we'll have to use blackbody atmospheres and manually provide limb-darkening.
# In[6]:
b.set_value(qualifier='atm', component='secondary', value='blackbody')
b.set_value(qualifier='ld_mode', component='secondary', value='manual')
# In[7]:
anim_times = phoebe.linspace(-0.1, 0.1, 101)
# In[8]:
b.add_dataset('mesh', compute_times=anim_times, coordinates='uvw', columns='teffs')
# Running Compute
# --------------------
# In[9]:
b.run_compute(distortion_method='sphere', irrad_method='none')
# Plotting
# ---------------
# In[10]:
print(np.min(b.get_value('teffs', time=0.0, component='primary')), np.max(b.get_value('teffs', time=0.0, component='primary')))
# Let's go through these options (see also the [plot API docs](../api/phoebe.parameters.ParameterSet.plot.md)):
# * `time`: make the plot at this single time
# * `fc`: (will be ignored by everything but the mesh): set the facecolor to the teffs column.
# * `fcmap`: use 'plasma' colormap instead of the default to avoid whites.
# * `fclim`: set the limits on facecolor so that the much cooler transiting object doesn't drive the entire range.
# * `ec`: disable drawing the edges of the triangles in a separate color. We could also set this to 'none', but then we'd be able to "see-through" the triangle edges.
# * `tight_layout`: use matplotlib's tight layout to ensure we have enough padding between axes to see the labels.
#
# In[11]:
afig, mplfig = b.plot(time=0.0,
fc='teffs', fcmap='plasma', fclim=(5000, 6000),
ec='face',
tight_layout=True,
show=True)
# Now let's animate the same figure in time. We'll use the same arguments as the static plot above, with the following exceptions:
#
# * `times`: pass our array of times that we want the animation to loop over.
# * `consider_for_limits`: for the mesh panel, keep the primary star centered and allow the transiting object to move in and out of the frame.
# * `pad_aspect`: pad_aspect doesn't work with animations, so we'll disable to avoid the warning messages.
# * `animate`: self-explanatory.
# * `save`: we could use `show=True`, but that doesn't always play nice with jupyter notebooks
# * `save_kwargs`: may need to change these for your setup, to create a gif, passing {'writer': 'imagemagick'} is often useful.
# In[12]:
afig, mplfig = b.plot(times=anim_times,
fc='teffs', fcmap='plasma', fclim=(5000, 6000),
ec='face',
consider_for_limits={'primary': True, 'secondary': False},
tight_layout=True, pad_aspect=False,
animate=True,
save='spot_transit.gif',
save_kwargs={'writer': 'imagemagick'})
# 
# In[ ]:
| gpl-3.0 |
eg-zhang/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
alexchao56/sklearn-theano | sklearn_theano/sandbox/logistic_regression.py | 9 | 1526 | import numpy as np
from theano import tensor as T
import theano
from sklearn.datasets import make_classification
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
rng = np.random.RandomState(1999)
X, y = make_classification(n_samples=400, n_features=25, n_informative=10,
n_classes=2, n_clusters_per_class=2,
random_state=1999)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.8)
n_samples, n_features = X_train.shape
x = T.matrix('x')
y = T.vector('y')
w = theano.shared(rng.randn(n_features), name='w')
b = theano.shared(0., name='b')
print("Initial model")
print(w.get_value(), b.get_value())
learning_rate = 0.01
reg = .1
n_iter = 10000
prob = 1 / (1 + T.exp(-T.dot(x, w) - b))
pred = prob > 0.5
loss = -y * T.log(prob) - (1 - y) * T.log(1 - prob)
# l2
# penalty = reg * (w ** 2).sum()
# l1
penalty = reg * abs(w).sum()
# l0
# penalty = reg * T.neq(w, 0).sum()
cost = loss.mean() + penalty
gw, gb = T.grad(cost, [w, b])
train = theano.function(inputs=[x, y], outputs=[pred, loss],
updates=((w, w - learning_rate * gw),
(b, b - learning_rate * gb)))
predict = theano.function(inputs=[x], outputs=pred)
for i in range(n_iter):
pred, err = train(X_train, y_train)
print("Final model:")
print(w.get_value(), b.get_value())
print("Report:")
y_pred = predict(X_test)
report = classification_report(y_test, y_pred)
print(report)
| bsd-3-clause |
nomadcube/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
golismero/golismero | thirdparty_libs/nltk/draw/dispersion.py | 17 | 1693 | # Natural Language Toolkit: Dispersion Plots
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Steven Bird <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
A utility for displaying lexical dispersion.
"""
def dispersion_plot(text, words, ignore_case=False):
"""
Generate a lexical dispersion plot.
:param text: The source text
:type text: list(str) or enum(str)
:param words: The target words
:type words: list of str
:param ignore_case: flag to set if case should be ignored when searching text
:type ignore_case: bool
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab).'
'See http://matplotlib.sourceforge.net/')
text = list(text)
words.reverse()
if ignore_case:
words_to_comp = map(str.lower, words)
text_to_comp = map(str.lower, text)
else:
words_to_comp = words
text_to_comp = text
points = [(x,y) for x in range(len(text_to_comp))
for y in range(len(words_to_comp))
if text_to_comp[x] == words_to_comp[y]]
if points:
x, y = zip(*points)
else:
x = y = ()
pylab.plot(x, y, "b|", scalex=.1)
pylab.yticks(range(len(words)), words, color="b")
pylab.ylim(-1, len(words))
pylab.title("Lexical Dispersion Plot")
pylab.xlabel("Word Offset")
pylab.show()
if __name__ == '__main__':
from nltk.corpus import gutenberg
words = ['Elinor', 'Marianne', 'Edward', 'Willoughby']
dispersion_plot(gutenberg.words('austen-sense.txt'), words)
| gpl-2.0 |
liberatorqjw/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 253 | 4158 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
Lawrence-Liu/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
saiwing-yeung/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 65 | 5529 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float64)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float64)
y.fill(0.0)
sw = np.ones(102, dtype=np.float64)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float64)
y.fill(1.0)
sw = np.ones(102, dtype=np.float64)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
SiPBA/brainSimulator | brainSimulator.py | 1 | 20182 | # -*- coding: utf-8 -*-
"""
.. module:: brainSimulator
:platform: Unix, Windows
:synopsis: Performs a simulation of functional neuroimaging based on parameters extracted from an existing dataset.
.. moduleauthor: Francisco J. Martinez-Murcia <[email protected]>
Created on Thu Apr 28 15:53:15 2016
Last update: 9 Aug, 2017
@author: Francisco J. Martinez-Murcia <[email protected]>
Copyright (C) 2017 Francisco Jesús Martínez Murcia and SiPBA Research Group
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import numbers
#Decomposition
#Good reconstruction is np.dot(Spca, pca.components_)+pca.mean_
from sklearn.decomposition import PCA, FastICA
def applyPCA(X, regularize=True, n_comp=-1):
"""
This function applies PCA decomposition to a matrix containing all subjects to be modeled.
:param X: The bidimensional array containing one image per row (conveniently vectorized)
:type X: numpy.ndarray
:param regularize: Whether or not to regularize (standardize) X. default=True.
:type regularize: bool
:param n_comp: Number of components to extract. If not specified, it will compute all available components except one.
:type n_comp: int
:returns:
* **Spca** (numpy.ndarray): Array with the PCA decomposition of X.
* **Components** (numpy.ndarray): Array with the eigenvalues of the PCA \
decomposition of X.
* **Mean** (numpy.ndarray): Vector with per-column average value.
* **Variance** (numpy.ndarray): Vector with per-column variance value.
"""
if(regularize):
mean_ = np.mean(X, axis=0)
X = X - mean_
var_ = np.var(X,axis=0)
X = X/var_
pca = PCA(n_components=n_comp)
Spca = pca.fit_transform(X)
if not regularize:
mean_ = pca.mean_
var_ = None
return Spca, pca.components_, mean_, var_
def applyICA(X, regularize=True, n_comp=-1):
"""
This function applies ICA decomposition to a matrix containing all subjects to be modeled.
:param X: The bidimensional array containing one image per row (conveniently vectorized)
:type X: numpy.ndarray
:param regularize: Whether or not to regularize (standardize) X. default=True.
:type regularize: bool
:param n_comp: Number of components to extract. If not specified, it will compute all available components except one.
:type n_comp: int
:returns:
* **Spca** (numpy.ndarray): Array with the ICA decomposition of X.
* **Components** (numpy.ndarray): Array with the eigenvalues of the ICA \
decomposition of X.
* **Mean** (numpy.ndarray): Vector with per-column average value.
* **Variance** (numpy.ndarray): Vector with per-column variance value.
"""
if(regularize):
mean_ = np.mean(X, axis=0)
X = X - mean_
var_ = np.var(X,axis=0)
X = X/var_
ica = FastICA(n_components=n_comp)
Sica = ica.fit_transform(X)
if not regularize:
mean_ = ica.mean_
var_ = None
return Sica, ica.components_, mean_, var_
#os.chdir('pyStable')
#from stable import StableDist
#os.chdir('..')
class GaussianEstimator:
"""
This class generates an interface for generating random numbers according
to a per-component gaussian parametrization, estimated from the data
"""
def __init__(self, mean=0.0, var=1.0):
self.mu = mean
self.var = var
def sample(self, dimension = 1.0):
return self.var*np.random.randn(dimension) + self.mu
def fit(self, x):
self.mu = x.mean()
self.var = x.var()
def pdf(self, x):
return (1/np.sqrt(2*self.var*np.pi))*np.exp(-np.power(x - self.mu, 2.) / (2 * self.var))
def cdf(self, x):
return np.exp(-np.power(x - self.mu, 2.) / (2 * self.var))
class MVNormalEstimator:
"""
This class creates an interface for generating random numbers according
to a given multivariate normal parametrization, estimated from the data
Works only with python 3.4+ (due to numpy matrix multiplication)
"""
def __init__(self, mean=0.0, cov=1.0):
from scipy.stats import multivariate_normal
self.mu = mean
self.cov = cov
self.model = multivariate_normal
def sample(self, dimension = 1.0):
return np.random.multivariate_normal(mean=self.mu, cov=self.cov, size=dimension)
def fit(self, x):
from sklearn.covariance import ledoit_wolf
self.mu = x.mean(axis=0)
self.cov = ledoit_wolf(x)[0] # Faster and easier
# self.cov = ((x-x.mean(axis=0))/data.shape[0]).T.dot(x-x.mean(axis=0)) # opcion más compleja.. timeit?
def pdf(self, x):
return self.model.pdf(x, mean=self.mu, cov=self.cov)
def cdf(self, x):
return np.exp(-np.power(x - self.mu, 2.) / (2 * self.var))
#Density estimation
class KDEestimator:
"""
An interface for generating random numbers according
to a given Kernel Density Estimation (KDE) parametrization based on the
data.
"""
def __init__(self, bandwidth=1.0):
from sklearn.neighbors.kde import KernelDensity
self.bandwidth = bandwidth
self.model = KernelDensity(bandwidth=self.bandwidth)
def _botev_fixed_point(self, t, M, I, a2):
# Find the largest float available for this numpy
if hasattr(np, 'float128'):
large_float = np.float128
elif hasattr(np, 'float96'):
large_float = np.float96
else:
large_float = np.float64
l = 7
I = large_float(I)
M = large_float(M)
a2 = large_float(a2)
f = 2 * np.pi ** (2 * l) * np.sum(I ** l * a2 *
np.exp(-I * np.pi ** 2 * t))
for s in range(l, 1, -1):
K0 = np.prod(np.arange(1, 2 * s, 2)) / np.sqrt(2 * np.pi)
const = (1 + (1 / 2) ** (s + 1 / 2)) / 3
time = (2 * const * K0 / M / f) ** (2 / (3 + 2 * s))
f = 2 * np.pi ** (2 * s) * \
np.sum(I ** s * a2 * np.exp(-I * np.pi ** 2 * time))
return t - (2 * M * np.sqrt(np.pi) * f) ** (-2 / 5)
def finite(self, val):
""" Checks if a value is finite or not """
return val is not None and np.isfinite(val)
def botev_bandwidth(self, data):
""" Implementation of the KDE bandwidth selection method outline in:
Z. I. Botev, J. F. Grotowski, and D. P. Kroese. *Kernel density estimation via diffusion.* The Annals of Statistics, 38(5):2916-2957, 2010.
Based on the implementation of Daniel B. Smith, PhD. The object is a callable returning the bandwidth for a 1D kernel.
Forked from the package `PyQT_fit <https://code.google.com/archive/p/pyqt-fit/>`_.
:param data: 1D array containing the data to model with a 1D KDE.
:type data: numpy.ndarray
:returns: Optimal bandwidth according to the data.
"""
from scipy import fftpack, optimize
# def __init__(self, N=None, **kword):
# if 'lower' in kword or 'upper' in kword:
# print("Warning, using 'lower' and 'upper' for botev bandwidth is "
# "deprecated. Argument is ignored")
# self.N = N
#
# def __call__(self, data):#, model):
# """
# Returns the optimal bandwidth based on the data
# """
N = 2 ** 10 #if self.N is None else int(2 ** np.ceil(np.log2(self.N)))
# lower = getattr(model, 'lower', None)
# upper = getattr(model, 'upper', None)
# if not finite(lower) or not finite(upper):
minimum = np.min(data)
maximum = np.max(data)
span = maximum - minimum
lower = minimum - span / 10 #if not finite(lower) else lower
upper = maximum + span / 10 #if not finite(upper) else upper
# Range of the data
span = upper - lower
# Histogram of the data to get a crude approximation of the density
# weights = model.weights
# if not weights.shape:
weights = None
M = len(data)
DataHist, bins = np.histogram(data, bins=N, range=(lower, upper), weights=weights)
DataHist = DataHist / M
DCTData = fftpack.dct(DataHist, norm=None)
I = np.arange(1, N, dtype=int) ** 2
SqDCTData = (DCTData[1:] / 2) ** 2
guess = 0.1
try:
t_star = optimize.brentq(self._botev_fixed_point, 0, guess,
args=(M, I, SqDCTData))
except ValueError:
t_star = .28 * N ** (-.4)
return np.sqrt(t_star) * span
def fit(self, x):
self.bandwidth = self.botev_bandwidth(x.flatten())
self.model.set_params(**{'bandwidth': self.bandwidth})
self.model.fit(x.reshape(-1,1))
def sample(self, dimension = 1.0):
return self.model.sample(dimension)
def pdf(self, x):
return self.model.score_samples(x)
class BrainSimulator:
def __init__(self, method = 'kde', algorithm='PCA', N=100, n_comp=-1, regularize=False, verbose=False):
self.method = method #PDF estimation method
self.algorithm = algorithm # algorithm used to decompose the dataset (PCA, ICA)
# self.N = N # Number of samples per class
self.n_comp = n_comp # Number of components used in the estimation
self.verbose = verbose
self.regularize = regularize # Sets regularization of decomposition via ICA or PCA.
self.kernels = None
self.uniqLabels = None
self.SCORE = None
self.COEFF = None
self.MEAN = None
self.VAR = None
def decompose(self, stack, labels):
"""
Applies PCA or ICA decomposition of the dataset.
:param stack: stack of vectorized images comprising the whole database to be decomposed
:type stack: numpy.ndarray
:param labels: labels of each subject in `stack`
:type labels: list or numpy.ndarray
:returns:
* **SCORE** - A matrix of component scores
* **COEFF** - The matrix of component loadings.
* **MEAN** - If standardized, the mean vector of all samples.
* **VAR** - If standardized, the variance of all samples.
"""
if(self.verbose):
print('Applying decomposition')
N_el = stack.shape[0]-1
if self.n_comp==-1:
self.n_comp = N_el
if self.algorithm=='PCA':
# Force to extract all components, and then extract the number of components in model
self.SCORE, self.COEFF, self.MEAN, self.VAR = applyPCA(stack, self.regularize, N_el)
elif self.algorithm=='ICA':
self.SCORE, self.COEFF, self.MEAN, self.VAR = applyICA(stack, self.regularize, self.n_comp)
return self.SCORE, self.COEFF, self.MEAN, self.VAR
def estimateDensity(self, X):
"""
Returns an estimator of the PDF of the current data.
:param X: the data from which the different kernels are fitted.
:type X: numpy.ndarray
:returns:
the trained kernel estimated for `X`
"""
if self.method is 'kde':
kernel = KDEestimator()
elif self.method is 'stable':
# kernel = StableDist(1, 1, 0, 1)
print('Not yet supported')
elif self.method is 'gaussian':
kernel = GaussianEstimator()
kernel.fit(X)
return kernel
def model(self, labels):
"""
Models the per-class distribution of scores and sets the kernels. Uses
the internally stored `SCORE` matrix, once the decomposition is applied
:param labels: labels of each subject in `stack`
:type labels: `list` or numpy.ndarray
:returns:
* **kernels** - a multivariate `kernel` or list of kernels, \
depending on the model.
* **uniqLabels** - unique labels used to create a standard object.
"""
if(self.verbose):
print('Creating Density Matrices')
self.kernels = []
# UniqLabels is key. Here the different class (either numbers or str)
# are held in different positions (ordered in number or alphabetic) and
# the kernels will saved in that very order.
self.uniqLabels = list(set(labels))
for idx,lab in enumerate(self.uniqLabels):
if self.method is 'mvnormal':
kernel = MVNormalEstimator()
kernel.fit(self.SCORE[labels==lab,:self.n_comp])
self.kernels.append(kernel)
else:
self.kernels.append([])
for el in self.SCORE[:,:self.n_comp].T: # per column
self.kernels[idx].append(self.estimateDensity(el[labels==lab]))
return self.kernels, self.uniqLabels
def fit(self, stack, labels):
"""
Performs the fitting of the model, in order to draw samples afterwards.
It applies the functions `self.decompose` and `self.model`
:param stack: stack of vectorized images comprising the whole database to be decomposed
:type stack: numpy.ndarray
:param labels: labels of each subject in `stack`
:type labels: list or numpy.ndarray
"""
labels = labels.astype(int)
# selection = np.array([x in self.classes for x in labels])
# stack_fin = stack[selection,:]
# labels_fin = labels[selection]
self.decompose(stack, labels)
self.model(labels)
def is_fitted(self):
"""
Returns true if the model has been fitted and is ready for use.
"""
checkVar = True
if self.kernels is None:
checkVar = False
return checkVar
def createNewBrains(self, N, kernel, components=None):
"""
Generates new samples in the eigenbrain space and projects back to
the image space for a given kernel and a specified number of
components.
:param N: Number of samples to draw from that class
:type N: integer
:param kernel: kernel or list of kernels to generate new samples
:type kernel: `KDEestimator`, `MVNormalEstimator` or \
`GaussianEstimator`
:param components: Number of components to be used in the \
reconstruction of the images.
:type components: int
:returns: **simStack** - a `stack` or numpy.ndarray containing `N` \
vectorized images in rows.
"""
import warnings
if components is None:
components = self.n_comp
elif isinstance(components, numbers.Number):
if components> self.n_comp:
warnings.warn("The model used less components than specified. Using default n_comp="+str(self.n_comp))
components = self.n_comp
else:
raise ValueError('n_comp should be a number or None')
if not isinstance(kernel, list):
newS = kernel.sample(N)
else:
newS = np.zeros((int(N), components))
for i in range(components):
k = kernel[i]
newS[:,i] = k.sample(N).flatten()
simStack = np.dot(newS[:,:components], self.COEFF[:components,:])
if self.VAR is not None:
simStack = simStack*self.VAR
simStack = simStack + self.MEAN
return simStack
def sample(self, N, clas=0, n_comp=None):
"""
Standard method that draws samples from the model.
:param N: number of samples to be generated for each class.
:type N: integer
:param clas: class (according to `self.uniqLabels`) of the images to \
be generated.
:type clas: integer
:param n_comp: Number of components to be used in the \
reconstruction of the images.
:type n_comp: int
:returns:
* **labels** - numpy.ndarray vector with `N` labels of `clas`
* **stack** - a `stack` or numpy.ndarray containing `N` \
vectorized images of clas `clas` in rows.
"""
if(self.verbose):
print('Creating brains with class %d'%clas)
stackaux = self.createNewBrains(N, self.kernels[self.uniqLabels.index(clas)], n_comp)
labelsaux = np.array([int(clas)]*N)
return labelsaux, stackaux
def generateDataset(self, stack=None, labels=None, N=100, classes=None, components=None):
"""
Fits the model and generates a new set of N elements for each class
specified in "classes".
:param stack: the stack from which the model will be created
:type stack: numpy.ndarray
:param labels: a vector containing the labels of the stacked dataset
:type labels: numpy.ndarray
:param N: the number of elements (per class) to be generated
:type N: either int (the same N will be generated per class) or a list\
of the same length as `classes` containing the number of subjects to \
be generated for each class respectively.
:param classes: the classes that we aim to generate
:type classes: a list of the classes to be generated, e.g.: `[0, 2]` \
or `['AD', 'CTL']`.
:param components: the number of components used in the synthesis. \
This parameter is only valid if `components` here is smaller than the\
`n_comp` specified when creating and fitting the `BrainSimulator`\
object.
:type components: integer
:returns:
* **labels** - numpy.ndarray vector with labels for `stack`
* **stack** - a `stack` or numpy.ndarray containing all synthetic \
images (N per clas `clas`) in rows.
"""
# If the model has not been fitted, fit it.
if not self.is_fitted():
if self.verbose:
print('Fitting the model')
self.fit(stack, labels)
# Classes input must correspond to the same numbers as labels
if classes==None:
clasdef = self.uniqLabels
else:
if (isinstance(classes[0], numbers.Number) and isinstance(self.uniqLabels[0], numbers.Number)) or (type(classes[0]) is type(self.uniqLabels[0])):
# self.classes = []
clasdef = classes
# for el in classes:
# if el in self.uniqLabels:
# clasdef.append(self.uniqLabels.index(el))
# else:
# print('Error: specified class has not been modeled')
else:
print('Error: class not correctly specified')
for ix, clas in enumerate(clasdef):
if type(N) is list:
labelsaux, stackaux = self.sample(N[ix], clas, components)
else:
labelsaux, stackaux = self.sample(N, clas, components)
if 'finStack' not in locals():
labels, finStack = labelsaux, stackaux
else:
finStack = np.vstack((finStack, stackaux))
labels = np.hstack((labels, labelsaux))
finStack[finStack<0]=0.
return labels, finStack
| gpl-3.0 |
Lab603/PicEncyclopedias | jni-build/jni-build/jni/include/tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 8 | 21806 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
if batch_size is None:
batch_size = x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
y_shape = list(y_shape[1:]) if len(y_shape) > 1 else []
# Skip first dimension if it is 1.
if y_shape and y_shape[0] == 1:
y_shape = y_shape[1:]
if n_classes is not None and n_classes > 1:
output_shape = [batch_size] + y_shape + [n_classes]
else:
output_shape = [batch_size] + y_shape
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(
x, y, n_classes, batch_size=None, shuffle=True, epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or iterable.
y: numpy, pandas or Dask array or iterable.
n_classes: number of classes.
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
chunk = []
for data in x:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or iterable.
batch_size: Size of batches to split data into.
If `None`, returns one batch of full size.
Returns:
List or iterator of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(
self, x, y, n_classes, batch_size=None, shuffle=True, random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: Feature Nd numpy matrix of shape `[n_samples, n_features, ...]`.
y: Target vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence
of targets. Can be `None` for unsupervised setting.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion.
batch_size: Mini-batch size to accumulate.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features.
y: Input target.
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input.
output_shape: Shape of the output.
input_dtype: DType of input.
output_dtype: DType of output.
"""
self._x = check_array(x, dtype=x.dtype)
# self.n_classes is None means we're passing in raw target indices.
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
if n_classes is not None:
self._y = (None if y is None else check_array(y, dtype=y_dtype))
elif isinstance(y, list):
self._y = np.array(y)
else:
self._y = y
self.n_classes = n_classes
self.max_epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
self._x.shape, None if self._y is None else self._y.shape, n_classes,
batch_size)
# Input dtype matches dtype of x.
self._input_dtype = _check_dtype(self._x.dtype)
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None or self._y is None:
self._output_dtype = np.float32
else:
self._output_dtype = _check_dtype(self._y.dtype)
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if self._shuffle:
self.indices = self.random_state.permutation(self._x.shape[0])
else:
self.indices = np.array(range(self._x.shape[0]))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(dtypes.int32, [1],
name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
input_shape = [None] + self.input_shape[1:]
self._input_placeholder = array_ops.placeholder(
dtypes.as_dtype(self._input_dtype),
input_shape,
name='input')
if self.output_shape is None:
self._output_placeholder = None
else:
output_shape = [None] + self.output_shape[1:]
self._output_placeholder = array_ops.placeholder(
dtypes.as_dtype(self._output_dtype),
output_shape,
name='output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be None.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
end = min(self._x.shape[0], self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# Assign input features from random indices.
inp = (
np.array(_access(self._x, batch_indices)).reshape(
(batch_indices.shape[0], 1))
if len(self._x.shape) == 1 else _access(self._x, batch_indices))
feed_dict[self._input_placeholder.name] = inp
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= self._x.shape[0]:
self.indices = self.random_state.permutation(self._x.shape[0])
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# assign labels from random indices
self.output_shape[0] = batch_indices.shape[0]
out = np.zeros(self.output_shape, dtype=self._output_dtype)
for i in xrange(out.shape[0]):
sample = batch_indices[i]
# self.n_classes is None means we're passing in raw target indices
if self.n_classes is None:
out[i] = _access(self._y, sample)
else:
if self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, int(_access(self._y, sample))), 1.0)
else:
for idx, value in enumerate(_access(self._y, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(self._y, sample)
feed_dict[self._output_placeholder.name] = out
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
Attributes:
x: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
[1] + list(x_first_el.shape),
[1] + list(y_first_el.shape) if y is not None else None,
n_classes,
batch_size)
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and n_classes > 0:
self._output_dtype = np.float32
elif y is not None:
if isinstance(y_first_el, list) or isinstance(y_first_el, np.ndarray):
self._output_dtype = _check_dtype(np.dtype(type(y_first_el[0])))
else:
self._output_dtype = _check_dtype(np.dtype(type(y_first_el)))
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
Dict of input and output tensors.
"""
if self.stopped:
raise StopIteration
inp = np.zeros(self.input_shape, dtype=self._input_dtype)
if self._y is not None:
out = np.zeros(self.output_shape, dtype=self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
inp[i, :] = six.next(self._x)
except StopIteration:
self.stopped = True
inp = inp[:i, :]
if self._y is not None:
out = out[:i]
break
if self._y is not None:
y = six.next(self._y)
if self.n_classes is not None and self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, y), 1.0)
else:
for idx, value in enumerate(y):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = y
if self._y is None:
return {self._input_placeholder.name: inp}
return {self._input_placeholder.name: inp,
self._output_placeholder.name: out}
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self, x, y, n_classes, batch_size, shuffle=True,
random_state=None, epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output targets.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp,
output_placeholder.name: encoded_out}
return _feed_dict_fn
| mit |
XianliangJ/collections | QJump/qjump-nsdi15-plotting/figure9/plot_ns2_flow_completion.py | 2 | 5695 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Malte Schwarzkopf
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the project, the name of copyright holder nor the names
# of its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import sys, re
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from utils import *
from matplotlib import pylab
if len(sys.argv) < 3:
print("usage: plot_ns2_flow_completion.py <input file> " \
"<plot_type: mean|99th> <workload: search|learning> " \
"<region: 1|2|3> <label> ... <label> <output file>")
sys.exit(1)
"""python plot_ns2_flow_completion.py baseline_largeinitcwnd_log.tr 0 tcp dctcp pdq pfabric ideal qj output.pdf"""
paper_mode = True
if paper_mode:
fig = plt.figure(figsize=(1.9,1.264))
set_paper_rcs()
else:
fig = plt.figure()
set_rcs()
#rc('figure.subplot', left=0.16, top=0.80, bottom=0.18, right=0.84)
colours = ['b', 'r', 'g', 'm', 'c', 'y']
input_file = open(sys.argv[1], 'r')
plot_type = sys.argv[2]
plot_workload = sys.argv[3]
plot_region = int(sys.argv[4])
plot_labels = []
for i in range(5, len(sys.argv) - 1):
plot_labels.append(sys.argv[i])
outname = sys.argv[-1]
labels = []
nrm_fct_99 = [[]]
nrm_fct_mean = [[]]
cur_label = None
prev_label = None
for line in input_file:
m = re.match("([a-zA-Z0-9]+)\.([a-zA-Z]+)\.([0-9]\.[0-9])\.?([a-zA-Z0-9]*)\.tr " \
"region([0-9]): 99% ([0-9\.]+) mean ([0-9\.]+).*", line)
if m:
fields = [x.strip() for x in line.split()]
cur_label = m.group(1)
cur_label += m.group(4)
cur_workload = m.group(2)
if cur_workload != plot_workload:
continue
region = int(m.group(5))
if region != plot_region:
continue
if not cur_label in plot_labels:
continue
if not prev_label:
labels.append(cur_label)
prev_label = cur_label
if cur_label != prev_label:
labels.append(cur_label)
nrm_fct_99.append([])
nrm_fct_mean.append([])
prev_label = cur_label
cur_load = m.group(3)
percentile99 = float(m.group(6))
mean = float(m.group(7))
nrm_fct_99[-1].append(percentile99)
nrm_fct_mean[-1].append(mean)
input_file.close()
if plot_type == '99th':
print "%s: REGION %d -- 99th PERCENTILE:" % (outname, plot_region)
print nrm_fct_99
elif plot_type == 'mean':
print "%s: REGION %d -- MEAN:" % (outname, plot_region)
print nrm_fct_mean
x_labels = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
num_label = 0
markers = {'baseline':'o', 'dctcp':'+', 'pdq':'v', 'pfabric':'x', 'ideal':'s',
'qj':'D', }
labels_to_names = {'baseline':'TCP', 'dctcp':'DCTCP', 'pdq':'PDQ',
'pfabric':'pFabric', 'ideal':'Ideal', 'qj':'QJump'}
#markers = ['o', 'v', '^', '+', 'x', 'D']
if plot_type == 'mean':
for i in range(0, len(labels)):
plt.plot(x_labels, nrm_fct_mean[i], label=labels_to_names[labels[i]],
color=colours[i], lw=1.0, marker=markers[labels[i]], mew=1.0, mfc='none',
mec=colours[i], ms=4)
elif plot_type == '99th':
for i in range(0, len(labels)):
plt.plot(x_labels, nrm_fct_99[i], label=labels_to_names[labels[i]],
color=colours[i], lw=1.0, marker=markers[labels[i]], mew=1.0, mfc='none',
mec=colours[i], ms=4)
else:
print "Unknown plot type specified!"
sys.exit(1)
#plt.ylim(1, 10)
plt.xlim(0.1, 0.8)
#ax1.set_yticks([0, 2, 4, 6, 8, 10])
#ax1.set_yticklabels(['0', '2', '4', '6', '8', '10'])
plt.yscale('log')
plt.ylim(1, 35)
plt.yticks([1, 2, 5, 10, 20], ['1', '2', '5', '10', '20'])
plt.xticks([0.2, 0.4, 0.6, 0.8], ['0.2', '0.4', '0.6', '0.8'])
plt.xlabel("Load")
plt.ylabel("Normalized FCT [$\log_{10}$]")
#labs = [l.get_label() for l in lns]
#ax1.legend(lns, labs, bbox_to_anchor=(-0.2, 1.02, 1.2, .102), loc=3,
# ncol=3, mode="expand", frameon=True, borderaxespad=0.,
# handletextpad=0.2)
#leg = ax1.get_legend()
#frame = leg.get_frame()
#frame.set_alpha(0.0)
if plot_region == 1 and plot_type == 'mean':
plt.legend(ncol=2, frameon=False, loc=2, borderaxespad=0.1,
handletextpad=0.2, columnspacing=0.4)
#plt.figtext(0.01, 0.82, "Latency", rotation='90')
plt.axhline(1, ls=':', color='k')
plt.savefig("%s.pdf" % outname, format="pdf", bbox_inches='tight',
pad_inches=0.04)
| gpl-3.0 |
kustomzone/augur-core | tests/fixedpoint_tests/optimal_interp.py | 4 | 9817 | #!/usr/bin/python2
import gmpy2
#import matplotlib.pyplot as plt
import os
import bisect
from numpy import linspace
# gmpy2 precision initialization
BITS = (1 << 10)
BYTES = BITS/8
gmpy2.get_context().precision = BITS # a whole lotta bits
def random():
seed = int(os.urandom(BYTES).encode('hex'), 16)
return gmpy2.mpfr_random(gmpy2.random_state(seed))
# Useful constants as mpfr
PI = gmpy2.acos(-1)
LOG2E = gmpy2.log2(gmpy2.exp(1))
LN2 = gmpy2.log(2)
# Same, as 192.64 fixedpoint
FX_PI = int(PI * 2**64)
FX_LOG2E = int(LOG2E * 2**64)
FX_LN2 = int(LN2 * 2**64)
FX_ONE = 1 << 64
## The index of a poly is the power of x,
## the val at the index is the coefficient.
##
## An nth degree poly is a list of len n + 1.
##
## The vals in a poly must all be floating point
## numbers.
def poly_add(p1, p2):
if p1 == []:
return p2
if p2 == []:
return p1
return [p1[0]+p2[0]] + poly_add(p1[1:], p2[1:])
def poly_mul(p1, p2):
new_len = len(p1) + len(p2) - 1
new_p = [0]*new_len
for i, a_i in enumerate(p1):
for j, a_j in enumerate(p2):
new_p[i+j] += a_i*a_j
return new_p
def scalar_mul(s, p):
return [s*a_i for a_i in p]
def scalar_div(p, s):
return [a_i/s for a_i in p]
def lagrange_basis_denom(j, xs):
result = 1
x_j = xs[j]
for m, x_m in enumerate(xs):
if m!=j:
result *= x_j - x_m
return result
def lagrange_basis_numer(j, xs):
result = [1]
for m, x_m in enumerate(xs):
if m!=j:
result = poly_mul(result, [-x_m, 1])
return result
def lagrange_basis(j, xs):
return scalar_div(lagrange_basis_numer(j, xs),
lagrange_basis_denom(j, xs))
def lagrange_interp(xs, ys):
result = []
for j, y_j in enumerate(ys):
result = poly_add(result,
scalar_mul(y_j,
lagrange_basis(j, xs)))
return result
def chebyshev_nodes(n, a, b):
nodes = []
for k in range(1, n + 1):
x_k = ((a + b) + (b - a)*gmpy2.cos((2*k - 1)*PI/2/n))/2
nodes.append(x_k)
return nodes
def optimal_interp(func, n, a, b):
xs = chebyshev_nodes(n, a, b)
ys = map(func, xs)
return lagrange_interp(xs, ys)
## fixedpoint functions, designed with 192.64 format in mind,
## though they should work with any n.64 format.
def make_fx_poly(poly):
return [int(a_i*2**64) for a_i in poly]
def fx_poly_eval(p, x):
result = p[0]
temp = x
for a_i in p[1:]:
result += a_i*temp >> 64
temp = temp*x >> 64
return result
def fx_max_random_error(fx_func, ref_func, trials, a, b):
max_err = 0
for i in range(trials):
random_input = random()*(b - a) + a
expected = ref_func(random_input) * 2**64
result = fx_func(int(random_input * 2**64))
err = abs(result - expected)/expected
if err > max_err:
max_err = err
return err*100
def fx_relative_random_error(fx_func, ref_func, trials, a, b):
errors = []
min_err, max_err = float('inf'), float('-inf')
for i in range(trials):
random_input = random()*(b - a) + a
expected = ref_func(random_input) * 2**64
result = fx_func(int(random_input * 2**64))
diff = result - expected
if diff < min_err:
min_err = diff
if max_err < diff:
max_err = diff
bisect.insort(errors, diff*100/expected)
return sum(map(abs, errors[trials/4:3*trials/4]))*4/trials, min(errors), max(errors), errors[len(errors)/2], max_err, min_err
def fx_floor_log2(x):
y = x >> 64
lo = 0
hi = 191
mid = (lo + hi) >> 1
while lo + 1 != hi:
if (1 << mid) > y:
hi = mid
else:
lo = mid
mid = (lo + hi) >> 1
return lo
def fx_log2(x, log2_poly):
y = fx_floor_log2(x)
z = x >> y # z = x/2^y
return (y << 64) + fx_poly_eval(log2_poly, z)
def fx_log(x, log2_poly):
return (fx_log2(x, log2_poly) << 64) / FX_LOG2E
def fx_exp2(x, exp2_poly):
y = x >> 64
z = x % (1 << 64)
return fx_poly_eval(exp2_poly, z) << y
def fx_exp(x, exp2_poly):
return fx_exp2((x << 64)/FX_LN2, exp2_poly)
## The tests combine the fixedpoint functions
## with the interpolation functions above, to
## test several different interpolations.
def test_interps_random(trials, *range_args):
log_min, log_max = 1, gmpy2.exp(128)
exp_min, exp_max = 0, 128
datastr = ',\n\t\t'.join([
'avg_abs_mid_50:%E%%', 'min_rel:%E%%',
'max_rel:%E%%', 'median_rel:%E%%',
'max_diff:%E%%', 'min_diff:%E%%'
])
errstr = '\terror in fx_log:\n\t\t'
errstr += datastr
errstr += '\n\terror in fx_exp:\n\t\t'
errstr += datastr
for i in range(*range_args):
log2_poly = make_fx_poly(optimal_interp(gmpy2.log2, i, 1, 2))
exp2_poly = make_fx_poly(optimal_interp(gmpy2.exp2, i, 0, 1))
logf = lambda x: fx_log(x, log2_poly)
expf = lambda x: fx_exp(x, exp2_poly)
max_log_err = fx_relative_random_error(logf, gmpy2.log, trials, log_min, log_max)
max_exp_err = fx_relative_random_error(expf, gmpy2.exp, trials, exp_min, exp_max)
errs = max_log_err + max_exp_err
print "Relative error using %d Chebyshev nodes:" % i
print errstr % errs
def graph_errors(*range_args):
exp_min, exp_max = 0, 4
exp_xs = map(gmpy2.mpfr, linspace(exp_min, exp_max, 10000))
exp_ys = map(gmpy2.exp, exp_xs)
log_min, log_max = 1, gmpy2.exp(exp_max)
log_xs = map(gmpy2.mpfr, linspace(log_min, log_max, 10000))
log_ys = map(gmpy2.log, log_xs)
funcs = [
(fx_exp, gmpy2.exp2, 0, 1, exp_xs, exp_ys, 'exp'),
(fx_log, gmpy2.log2, 1, 2, log_xs, log_ys, 'log'),
]
for i in range(*range_args):
for func_items in funcs:
fx_func, interp_func = func_items[:2]
interp_min, interp_max = func_items[2:4]
ref_xs, ref_ys = func_items[4:6]
name = func_items[6]
p_i = make_fx_poly(
optimal_interp(
interp_func,
i,
interp_min,
interp_max
)
)
fx_f = lambda x: fx_func(int(x * 2**64), p_i)/gmpy2.mpfr(1 << 64)
fx_ys = map(fx_f, ref_xs)
first_diff = map(lambda a, b: b - a, fx_ys[:-1], fx_ys[1:])
fig, axes = plt.subplots(3, sharex=True)
axes[0].set_title('$\\%s(x)$ and $\\%s_{fx}(x)$' % (name, name))
axes[0].plot(ref_xs, ref_ys, label=('$\\%s$' % name))
axes[0].plot(ref_xs, fx_ys, label=('$\\%s_{fx}$' % name))
axes[1].set_title('$(\\%s_{fx} - \\%s)(x)$' % (name, name))
axes[1].plot(ref_xs, map(lambda a, b: a-b, fx_ys, ref_ys))
axes[2].set_title('$\\frac{d}{dx}(\\%s_{fx})$' % name)
axes[2].plot(ref_xs[:-1], first_diff)
fig.savefig('chebyshev-%s-%d.png'%(name, i))
if any(map(lambda a: 1 if a < 0 else 0, first_diff)):
print "\033[1;31mBAD FIRST DIFF!!!!! fx_%s with %d nodes\033[0m" % (name, i)
def generate_serpent(*range_args):
exp_code = '''\
macro fx_exp2_small($x):
with $result = %s0x{poly[0]:X}:
with $temp = $x:
{interp_code}
macro fx_exp2($x):
with $y = $x / 0x{FX_ONE:X}:
with $z = $x %% 0x{FX_ONE:X}:
fx_exp2_small($z) * 2**y
macro fx_exp($x):
fx_exp2($x * 0x{FX_ONE:X} / 0x{FX_LN2:X})
'''
log_code = '''
macro fx_floor_log2($x):
with $y = $x / 0x{FX_ONE:X}:
with $lo = 0:
with $hi = 191:
with $mid = ($hi + $lo)/2:
while ($lo + 1) != $hi:
if $y < 2**$mid:
$hi = $mid
else:
$lo = $mid
$mid = ($hi + $lo)/2
$lo
macro fx_log2_small($x):
with $result = %s0x{poly[0]:X}:
with $temp = $x:
{interp_code}
macro fx_log2($x):
with $y = fx_floor_log2($x):
with $z = $x / 2**$y:
$y * 0x{FX_ONE:X} + fx_log2_small($z)
macro fx_log($x):
fx_log2($x) * 0x{FX_ONE:X} / 0x{FX_LOG2E:X}
'''
code_items = [
(exp_code, gmpy2.exp2, 0, 1),
(log_code, gmpy2.log2, 1, 2),
]
tab = ' '*12
for i in range(*range_args):
full_code = ''
for code, ref_func, a, b in code_items:
poly = make_fx_poly(optimal_interp(ref_func, i, a, b))
interp_code = ''
for j, a_j in enumerate(poly[1:-1]):
piece = '$result %%s= 0x{poly[%d]:X}*$temp / 0x{FX_ONE:X}' % (j + 1)
if a_j > 0:
interp_code += piece % '+'
else:
interp_code += piece % '-'
interp_code += '\n' + tab
interp_code += '$temp = $temp*$x / 0x{FX_ONE:X}'
interp_code += '\n' + tab
if poly[0] > 0:
this_code = code % '+'
else:
this_code = code % '-'
if poly[-1] > 0:
interp_code += '$result + 0x{poly[%d]:X}*$temp / 0x{FX_ONE:X}' % (len(poly) - 1)
else:
interp_code += '$result - 0x{poly[%d]:X}*$temp / 0x{FX_ONE:X}' % (len(poly) - 1)
poly = map(abs, poly)
fmt_args = globals().copy()
fmt_args.update(locals())
this_code = this_code.format(**fmt_args).format(**fmt_args)
full_code += this_code
f = open('fx_macros_%d.se'%i, 'w')
f.write(full_code)
f.close()
if __name__ == '__main__':
# graph_errors(15, 21)
generate_serpent(15, 21)
| gpl-3.0 |
qiime2-plugins/normalize | q2_feature_table/tests/test_rename.py | 3 | 5279 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import warnings
import biom
import qiime2
import pandas as pd
import numpy as np
import numpy.testing as npt
from q2_feature_table import _rename
class TestRename(unittest.TestCase):
def setUp(self):
self.old_ids = ['S1', 'S2', 'S3']
self.name_map = pd.Series({'S1': 'S1_new',
'S2': 'S2_new',
'S4': 'S4_name'})
self.known = {'S1': 'S1_new', 'S2': 'S2_new', 'S3': 'S3'}
def test_generate_new_names_non_unique(self):
name_map = pd.Series({'S1': 'S2_new', 'S2': 'S2_new'})
with self.assertRaises(ValueError) as cm:
_rename._generate_new_names(self.old_ids,
name_map,
strict=True,
verbose=False)
self.assertEqual(
str(cm.exception),
('All new ids must be unique.\n'
'Try the group method in this plugin if you want '
'to combine multiple samples in the same table.')
)
def test_generate_new_names_old_disjoint_strict(self):
with self.assertRaises(ValueError) as cm:
_rename._generate_new_names(self.old_ids,
self.name_map,
strict=True,
verbose=False)
self.assertEqual(
str(cm.exception),
("There are ids in the table which do not have new names.\n"
"Either turn off strict mode or provide a remapping for "
"all ids.\nThe following ids are not mapped:\n S3")
)
def test_generate_new_names_verbose_warnings(self):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
new_names = \
_rename._generate_new_names(self.old_ids,
self.name_map,
strict=False,
verbose=True)
self.assertEqual(len(w), 2)
self.assertTrue(isinstance(w[0].message, UserWarning))
self.assertEqual(str(w[0].message),
'There are ids in the original table which do not '
'have new names.\nThe following ids will not be '
'included:\n S3')
self.assertTrue(isinstance(w[1].message, UserWarning))
self.assertEqual(str(w[1].message),
'There are ids supplied for renaming that are not in'
' the table.\nThe following ids will not be mapped:'
'\n S4'
)
self.assertEqual(new_names.keys(), self.known.keys())
for k, v in new_names.items():
self.assertEqual(v, self.known[k])
def test_generate_new_names_no_verbse(self):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
new_names = \
_rename._generate_new_names(self.old_ids,
self.name_map,
strict=False,
verbose=False)
self.assertEqual(len(w), 0)
self.assertEqual(new_names.keys(), self.known.keys())
for k, v in new_names.items():
self.assertEqual(v, self.known[k])
def test_rename_samples(self):
table = biom.Table(np.array([[0, 1, 2], [3, 4, 5]]),
observation_ids=['01', '02'],
sample_ids=['S1', 'S2', 'S3'])
meta1 = qiime2.Metadata(pd.DataFrame(
data=np.array([['cat'], ['rat'], ['dog']]),
index=pd.Index(['S1', 'S2', 'S3'], name='sample-id'),
columns=['animal']
))
meta2 = qiime2.Metadata(pd.DataFrame(
data=[['CATCATCAT'], ['WANTCAT']],
index=pd.Index(['01', '02'], name='feature-id'),
columns=['sequence']
))
updated = _rename.rename_ids(table,
meta1.get_column('animal'))
updated = _rename.rename_ids(updated,
meta2.get_column('sequence'),
axis='feature')
npt.assert_array_equal(np.array(updated.ids(axis='sample')),
np.array(['cat', 'rat', 'dog']))
npt.assert_array_equal(np.array(updated.ids(axis='observation')),
np.array(['CATCATCAT', 'WANTCAT']))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
gef756/statsmodels | statsmodels/tsa/statespace/sarimax.py | 6 | 82403 | """
SARIMAX Model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
import numpy as np
import pandas as pd
from .kalman_filter import KalmanFilter, FilterResults
from .mlemodel import MLEModel, MLEResults, MLEResultsWrapper
from .tools import (
companion_matrix, diff, is_invertible, constrain_stationary_univariate,
unconstrain_stationary_univariate
)
from scipy.linalg import solve_discrete_lyapunov
from statsmodels.tools.tools import Bunch
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.wrapper as wrap
class SARIMAX(MLEModel):
r"""
Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors
model
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k.
order : iterable or iterable of iterables, optional
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters. `d` must be an integer
indicating the integration order of the process, while
`p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. Default is
an AR(1) model: (1,0,0).
seasonal_order : iterable, optional
The (P,D,Q,s) order of the seasonal component of the model for the
AR parameters, differences, MA parameters, and periodicity.
`d` must be an integer indicating the integration order of the process,
while `p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. `s` is an
integer giving the periodicity (number of periods in season), often it
is 4 for quarterly data or 12 for monthly data. Default is no seasonal
effect.
trend : str{'n','c','t','ct'} or iterable, optional
Parameter controlling the deterministic trend polynomial :math:`A(t)`.
Can be specified as a string where 'c' indicates a constant (i.e. a
degree zero component of the trend polynomial), 't' indicates a
linear trend with time, and 'ct' is both. Can also be specified as an
iterable defining the polynomial as in `numpy.poly1d`, where
`[1,1,0,1]` would denote :math:`a + bt + ct^3`. Default is to not
include a trend component.
measurement_error : boolean, optional
Whether or not to assume the endogenous observations `endog` were
measured with error. Default is False.
time_varying_regression : boolean, optional
Used when an explanatory variables, `exog`, are provided provided
to select whether or not coefficients on the exogenous regressors are
allowed to vary over time. Default is False.
mle_regression : boolean, optional
Whether or not to use estimate the regression coefficients for the
exogenous variables as part of maximum likelihood estimation or through
the Kalman filter (i.e. recursive least squares). If
`time_varying_regression` is True, this must be set to False. Default
is True.
simple_differencing : boolean, optional
Whether or not to use partially conditional maximum likelihood
estimation. If True, differencing is performed prior to estimation,
which discards the first :math:`s D + d` initial rows but reuslts in a
smaller state-space formulation. If False, the full SARIMAX model is
put in state-space form so that all datapoints can be used in
estimation. Default is False.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
enforce_invertibility : boolean, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
hamilton_representation : boolean, optional
Whether or not to use the Hamilton representation of an ARMA process
(if True) or the Harvey representation (if False). Default is False.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices or for Kalman filtering options. See `Representation`, and
`KalmanFilter` for more details.
Attributes
----------
measurement_error : boolean
Whether or not to assume the endogenous
observations `endog` were measured with error.
state_error : boolean
Whether or not the transition equation has an error component.
mle_regression : boolean
Whether or not the regression coefficients for
the exogenous variables were estimated via maximum
likelihood estimation.
state_regression : boolean
Whether or not the regression coefficients for
the exogenous variables are included as elements
of the state space and estimated via the Kalman
filter.
time_varying_regression : boolean
Whether or not coefficients on the exogenous
regressors are allowed to vary over time.
simple_differencing : boolean
Whether or not to use partially conditional maximum likelihood
estimation.
enforce_stationarity : boolean
Whether or not to transform the AR parameters
to enforce stationarity in the autoregressive
component of the model.
enforce_invertibility : boolean
Whether or not to transform the MA parameters
to enforce invertibility in the moving average
component of the model.
hamilton_representation : boolean
Whether or not to use the Hamilton representation of an ARMA process.
trend : str{'n','c','t','ct'} or iterable
Parameter controlling the deterministic
trend polynomial :math:`A(t)`. See the class
parameter documentation for more information.
polynomial_ar : array
Array containing autoregressive lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients,
ordered from lowest degree to highest. Initialized
with ones, unless a coefficient is constrained to be
zero (in which case it is zero).
k_ar : int
Highest autoregressive order in the model, zero-indexed.
k_ar_params : int
Number of autoregressive parameters to be estimated.
k_diff : int
Order of intergration.
k_ma : int
Highest moving average order in the model, zero-indexed.
k_ma_params : int
Number of moving average parameters to be estimated.
k_seasons : int
Number of periods in a season.
k_seasonal_ar : int
Highest seasonal autoregressive order in the model, zero-indexed.
k_seasonal_ar_params : int
Number of seasonal autoregressive parameters to be estimated.
k_seasonal_diff : int
Order of seasonal intergration.
k_seasonal_ma : int
Highest seasonal moving average order in the model, zero-indexed.
k_seasonal_ma_params : int
Number of seasonal moving average parameters to be estimated.
k_trend : int
Order of the trend polynomial plus one (i.e. the constant polynomial
would have `k_trend=1`).
k_exog : int
Number of exogenous regressors.
Notes
-----
The SARIMA model is specified :math:`(p, d, q) \times (P, D, Q)_s`.
.. math::
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D y_t = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
In terms of a univariate structural model, this can be represented as
.. math::
y_t & = u_t + \eta_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
where :math:`\eta_t` is only applicable in the case of measurement error
(although it is also used in the case of a pure regression model, i.e. if
p=q=0).
In terms of this model, regression with SARIMA errors can be represented
easily as
.. math::
y_t & = \beta_t x_t + u_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
this model is the one used when exogenous regressors are provided.
Note that the reduced form lag polynomials will be written as:
.. math::
\Phi (L) \equiv \phi_p (L) \tilde \phi_P (L^s) \\
\Theta (L) \equiv \theta_q (L) \tilde \theta_Q (L^s)
If `mle_regression` is True, regression coefficients are treated as
additional parameters to be estimated via maximum likelihood. Otherwise
they are included as part of the state with a diffuse initialization.
In this case, however, with approximate diffuse initialization, results
can be sensitive to the initial variance.
This class allows two different underlying representations of ARMA models
as state space models: that of Hamilton and that of Harvey. Both are
equivalent in the sense that they are analytical representations of the
ARMA model, but the state vectors of each have different meanings. For
this reason, maximum likelihood does not result in identical parameter
estimates and even the same set of parameters will result in different
loglikelihoods.
The Harvey representation is convenient because it allows integrating
differencing into the state vector to allow using all observations for
estimation.
In this implementation of differenced models, the Hamilton representation
is not able to accomodate differencing in the state vector, so
`simple_differencing` (which performs differencing prior to estimation so
that the first d + sD observations are lost) must be used.
Many other packages use the Hamilton representation, so that tests against
Stata and R require using it along with simple differencing (as Stata
does).
Detailed information about state space models can be found in [1]_. Some
specific references are:
- Chapter 3.4 describes ARMA and ARIMA models in state space form (using
the Harvey representation), and gives references for basic seasonal
models and models with a multiplicative form (for example the airline
model). It also shows a state space model for a full ARIMA process (this
is what is done here if `simple_differencing=False`).
- Chapter 3.6 describes estimating regression effects via the Kalman filter
(this is performed if `mle_regression` is False), regression with
time-varying coefficients, and regression with ARMA errors (recall from
above that if regression effects are present, the model estimated by this
class is regression with SARIMA errors).
- Chapter 8.4 describes the application of an ARMA model to an example
dataset. A replication of this section is available in an example
IPython notebook in the documentation.
References
----------
.. [1] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
def __init__(self, endog, exog=None, order=(1, 0, 0),
seasonal_order=(0, 0, 0, 0), trend=None,
measurement_error=False, time_varying_regression=False,
mle_regression=True, simple_differencing=False,
enforce_stationarity=True, enforce_invertibility=True,
hamilton_representation=False, **kwargs):
# Model parameters
self.k_seasons = seasonal_order[3]
self.measurement_error = measurement_error
self.time_varying_regression = time_varying_regression
self.mle_regression = mle_regression
self.simple_differencing = simple_differencing
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
self.hamilton_representation = hamilton_representation
# Save given orders
self.order = order
self.seasonal_order = seasonal_order
# Enforce non-MLE coefficients if time varying coefficients is
# specified
if self.time_varying_regression and self.mle_regression:
raise ValueError('Models with time-varying regression coefficients'
' must integrate the coefficients as part of the'
' state vector, so that `mle_regression` must'
' be set to False.')
# Lag polynomials
# Assume that they are given from lowest degree to highest, that all
# degrees except for the constant are included, and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(order[0], int):
self.polynomial_ar = np.r_[1., np.ones(order[0])]
else:
self.polynomial_ar = np.r_[1., order[0]]
if isinstance(order[2], int):
self.polynomial_ma = np.r_[1., np.ones(order[2])]
else:
self.polynomial_ma = np.r_[1., order[2]]
# Assume that they are given from lowest degree to highest, that the
# degrees correspond to (1*s, 2*s, ..., P*s), and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(seasonal_order[0], int):
self.polynomial_seasonal_ar = np.r_[
1., # constant
([0] * (self.k_seasons - 1) + [1]) * seasonal_order[0]
]
else:
self.polynomial_seasonal_ar = np.r_[
1., [0] * self.k_seasons * len(seasonal_order[0])
]
for i in range(len(seasonal_order[0])):
self.polynomial_seasonal_ar[(i + 1) * self.k_seasons] = (
seasonal_order[0][i]
)
if isinstance(seasonal_order[2], int):
self.polynomial_seasonal_ma = np.r_[
1., # constant
([0] * (self.k_seasons - 1) + [1]) * seasonal_order[2]
]
else:
self.polynomial_seasonal_ma = np.r_[
1., [0] * self.k_seasons * len(seasonal_order[2])
]
for i in range(len(seasonal_order[2])):
self.polynomial_seasonal_ma[(i + 1) * self.k_seasons] = (
seasonal_order[2][i]
)
# Deterministic trend polynomial
self.trend = trend
if trend is None or trend == 'n':
self.polynomial_trend = np.ones((0))
elif trend == 'c':
self.polynomial_trend = np.r_[1]
elif trend == 't':
self.polynomial_trend = np.r_[0, 1]
elif trend == 'ct':
self.polynomial_trend = np.r_[1, 1]
else:
self.polynomial_trend = (np.array(trend) > 0).astype(int)
# Model orders
# Note: k_ar, k_ma, k_seasonal_ar, k_seasonal_ma do not include the
# constant term, so they may be zero.
# Note: for a typical ARMA(p,q) model, p = k_ar_params = k_ar - 1 and
# q = k_ma_params = k_ma - 1, although this may not be true for models
# with arbitrary log polynomials.
self.k_ar = int(self.polynomial_ar.shape[0] - 1)
self.k_ar_params = int(np.sum(self.polynomial_ar) - 1)
self.k_diff = int(order[1])
self.k_ma = int(self.polynomial_ma.shape[0] - 1)
self.k_ma_params = int(np.sum(self.polynomial_ma) - 1)
self.k_seasonal_ar = int(self.polynomial_seasonal_ar.shape[0] - 1)
self.k_seasonal_ar_params = (
int(np.sum(self.polynomial_seasonal_ar) - 1)
)
self.k_seasonal_diff = int(seasonal_order[1])
self.k_seasonal_ma = int(self.polynomial_seasonal_ma.shape[0] - 1)
self.k_seasonal_ma_params = (
int(np.sum(self.polynomial_seasonal_ma) - 1)
)
# Make internal copies of the differencing orders because if we use
# simple differencing, then we will need to internally use zeros after
# the simple differencing has been performed
self._k_diff = self.k_diff
self._k_seasonal_diff = self.k_seasonal_diff
# We can only use the Hamilton representation if differencing is not
# performed as a part of the state space
if (self.hamilton_representation and not (self.simple_differencing or
self._k_diff == self._k_seasonal_diff == 0)):
raise ValueError('The Hamilton representation is only available'
' for models in which there is no differencing'
' integrated into the state vector. Set'
' `simple_differencing` to True or set'
' `hamilton_representation` to False')
# Note: k_trend is not the degree of the trend polynomial, because e.g.
# k_trend = 1 corresponds to the degree zero polynomial (with only a
# constant term).
self.k_trend = int(np.sum(self.polynomial_trend))
# Model order
# (this is used internally in a number of locations)
self._k_order = max(self.k_ar + self.k_seasonal_ar,
self.k_ma + self.k_seasonal_ma + 1)
if self._k_order == 1 and self.k_ar + self.k_seasonal_ar == 0:
self._k_order = 0
# Exogenous data
self.k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
self.k_exog = exog.shape[1]
# Redefine mle_regression to be true only if it was previously set to
# true and there are exogenous regressors
self.mle_regression = (
self.mle_regression and exog is not None and self.k_exog > 0
)
# State regression is regression with coefficients estiamted within
# the state vector
self.state_regression = (
not self.mle_regression and exog is not None and self.k_exog > 0
)
# If all we have is a regression (so k_ar = k_ma = 0), then put the
# error term as measurement error
if self.state_regression and self._k_order == 0:
self.measurement_error = True
# Number of states
k_states = self._k_order
if not self.simple_differencing:
k_states += self.k_seasons * self._k_seasonal_diff + self._k_diff
if self.state_regression:
k_states += self.k_exog
# Number of diffuse states
k_diffuse_states = k_states
if self.enforce_stationarity:
k_diffuse_states -= self._k_order
# Number of positive definite elements of the state covariance matrix
k_posdef = int(self._k_order > 0)
# Only have an error component to the states if k_posdef > 0
self.state_error = k_posdef > 0
if self.state_regression and self.time_varying_regression:
k_posdef += self.k_exog
# Diffuse initialization can be more sensistive to the variance value
# in the case of state regression, so set a higher than usual default
# variance
if self.state_regression:
kwargs.setdefault('initial_variance', 1e10)
# Number of parameters
self.k_params = (
self.k_ar_params + self.k_ma_params +
self.k_seasonal_ar_params + self.k_seasonal_ar_params +
self.k_trend +
self.measurement_error + 1
)
if self.mle_regression:
self.k_params += self.k_exog
# We need to have an array or pandas at this point
self.orig_endog = endog
self.orig_exog = exog
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog)
# Update the differencing dimensions if simple differencing is applied
self.orig_k_diff = self._k_diff
self.orig_k_seasonal_diff = self._k_seasonal_diff
if (self.simple_differencing and
(self._k_diff > 0 or self._k_seasonal_diff > 0)):
self._k_diff = 0
self._k_seasonal_diff = 0
# Internally used in several locations
self._k_states_diff = (
self._k_diff + self.k_seasons * self._k_seasonal_diff
)
# Set some model variables now so they will be available for the
# initialize() method, below
self.nobs = len(endog)
self.k_states = k_states
self.k_posdef = k_posdef
# By default, do not calculate likelihood while it is controlled by
# diffuse initial conditions.
kwargs.setdefault('loglikelihood_burn', k_diffuse_states)
# Initialize the statespace
super(SARIMAX, self).__init__(
endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
)
# Handle kwargs specified initialization
if self.ssm.initialization is not None:
self._manual_initialization = True
# Initialize the fixed components of the statespace model
self.ssm.design = self.initial_design
self.ssm.state_intercept = self.initial_state_intercept
self.ssm.transition = self.initial_transition
self.ssm.selection = self.initial_selection
# If we are estimating a simple ARMA model, then we can use a faster
# initialization method (unless initialization was already specified).
if k_diffuse_states == 0 and not self._manual_initialization:
self.initialize_stationary()
# update _init_keys attached by super
self._init_keys += ['order', 'seasonal_order', 'trend',
'measurement_error', 'time_varying_regression',
'mle_regression', 'simple_differencing',
'enforce_stationarity', 'enforce_invertibility',
'hamilton_representation'] + list(kwargs.keys())
# TODO: I think the kwargs or not attached, need to recover from ???
def _get_init_kwds(self):
kwds = super(SARIMAX, self)._get_init_kwds()
for key, value in kwds.items():
if value is None and hasattr(self.ssm, key):
kwds[key] = getattr(self.ssm, key)
return kwds
def prepare_data(self):
endog, exog = super(SARIMAX, self).prepare_data()
# Perform simple differencing if requested
if (self.simple_differencing and
(self.orig_k_diff > 0 or self.orig_k_seasonal_diff > 0)):
# Perform simple differencing
endog = diff(endog.copy(), self.orig_k_diff,
self.orig_k_seasonal_diff, self.k_seasons)
if exog is not None:
exog = diff(exog.copy(), self.orig_k_diff,
self.orig_k_seasonal_diff, self.k_seasons)
# Reset the ModelData datasets
self.data.endog, self.data.exog = (
self.data._convert_endog_exog(endog, exog))
# Reset the nobs
self.nobs = endog.shape[0]
# Cache the arrays for calculating the intercept from the trend
# components
time_trend = np.arange(1, self.nobs + 1)
self._trend_data = np.zeros((self.nobs, self.k_trend))
i = 0
for k in self.polynomial_trend.nonzero()[0]:
if k == 0:
self._trend_data[:, i] = np.ones(self.nobs,)
else:
self._trend_data[:, i] = time_trend**k
i += 1
return endog, exog
def initialize(self):
"""
Initialize the SARIMAX model.
Notes
-----
These initialization steps must occur following the parent class
__init__ function calls.
"""
super(SARIMAX, self).initialize()
# Internal flag for whether the default mixed approximate diffuse /
# stationary initialization has been overridden with a user-supplied
# initialization
self._manual_initialization = False
# Cache the indexes of included polynomial orders (for update below)
# (but we do not want the index of the constant term, so exclude the
# first index)
self._polynomial_ar_idx = np.nonzero(self.polynomial_ar)[0][1:]
self._polynomial_ma_idx = np.nonzero(self.polynomial_ma)[0][1:]
self._polynomial_seasonal_ar_idx = np.nonzero(
self.polynomial_seasonal_ar
)[0][1:]
self._polynomial_seasonal_ma_idx = np.nonzero(
self.polynomial_seasonal_ma
)[0][1:]
# Save the indices corresponding to the reduced form lag polynomial
# parameters in the transition and selection matrices so that they
# don't have to be recalculated for each update()
start_row = self._k_states_diff
end_row = start_row + self.k_ar + self.k_seasonal_ar
col = self._k_states_diff
if not self.hamilton_representation:
self.transition_ar_params_idx = (
np.s_['transition', start_row:end_row, col]
)
else:
self.transition_ar_params_idx = (
np.s_['transition', col, start_row:end_row]
)
start_row += 1
end_row = start_row + self.k_ma + self.k_seasonal_ma
col = 0
if not self.hamilton_representation:
self.selection_ma_params_idx = (
np.s_['selection', start_row:end_row, col]
)
else:
self.design_ma_params_idx = (
np.s_['design', col, start_row:end_row]
)
# Cache indices for exog variances in the state covariance matrix
if self.state_regression and self.time_varying_regression:
idx = np.diag_indices(self.k_posdef)
self._exog_variance_idx = ('state_cov', idx[0][-self.k_exog:],
idx[1][-self.k_exog:])
def initialize_known(self, initial_state, initial_state_cov):
self._manual_initialization = True
self.ssm.initialize_known(initial_state, initial_state_cov)
initialize_known.__doc__ = KalmanFilter.initialize_known.__doc__
def initialize_approximate_diffuse(self, variance=None):
self._manual_initialization = True
self.ssm.initialize_approximate_diffuse(variance)
initialize_approximate_diffuse.__doc__ = (
KalmanFilter.initialize_approximate_diffuse.__doc__
)
def initialize_stationary(self):
self._manual_initialization = True
self.ssm.initialize_stationary()
initialize_stationary.__doc__ = (
KalmanFilter.initialize_stationary.__doc__
)
def initialize_state(self, variance=None):
"""
Initialize state and state covariance arrays in preparation for the
Kalman filter.
Parameters
----------
variance : float, optional
The variance for approximating diffuse initial conditions. Default
can be found in the Representation class documentation.
Notes
-----
Initializes the ARMA component of the state space to the typical
stationary values and the other components as approximate diffuse.
Can be overridden be calling one of the other initialization methods
before fitting the model.
"""
# Check if a manual initialization has already been specified
if self._manual_initialization:
return
# If we're not enforcing stationarity, then we can't initialize a
# stationary component
if not self.enforce_stationarity:
self.initialize_approximate_diffuse(variance)
return
# Otherwise, create the initial state and state covariance matrix
# as from a combination of diffuse and stationary components
# Create initialized non-stationary components
if variance is None:
variance = self.ssm.initial_variance
dtype = self.ssm.transition.dtype
initial_state = np.zeros(self.k_states, dtype=dtype)
initial_state_cov = np.eye(self.k_states, dtype=dtype) * variance
# Get the offsets (from the bottom or bottom right of the vector /
# matrix) for the stationary component.
if self.state_regression:
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
start = -self._k_order
end = None
# Add in the initialized stationary components
if self._k_order > 0:
selection_stationary = self.ssm.selection[start:end, :, 0]
selected_state_cov_stationary = np.dot(
np.dot(selection_stationary, self.ssm.state_cov[:, :, 0]),
selection_stationary.T
)
initial_state_cov_stationary = solve_discrete_lyapunov(
self.ssm.transition[start:end, start:end, 0],
selected_state_cov_stationary
)
initial_state_cov[start:end, start:end] = (
initial_state_cov_stationary
)
self.ssm.initialize_known(initial_state, initial_state_cov)
@property
def initial_design(self):
"""Initial design matrix"""
# Basic design matrix
design = np.r_[
[1] * self._k_diff,
([0] * (self.k_seasons - 1) + [1]) * self._k_seasonal_diff,
[1] * self.state_error, [0] * (self._k_order - 1)
]
# If we have exogenous regressors included as part of the state vector
# then the exogenous data is incorporated as a time-varying component
# of the design matrix
if self.state_regression:
if self._k_order > 0:
design = np.c_[
np.reshape(
np.repeat(design, self.nobs),
(design.shape[0], self.nobs)
).T,
self.exog
].T[None, :, :]
else:
design = self.exog.T[None, :, :]
return design
@property
def initial_state_intercept(self):
"""Initial state intercept vector"""
# TODO make this self.k_trend > 1 and adjust the update to take
# into account that if the trend is a constant, it is not time-varying
if self.k_trend > 0:
state_intercept = np.zeros((self.k_states, self.nobs))
else:
state_intercept = np.zeros((self.k_states,))
return state_intercept
@property
def initial_transition(self):
"""Initial transition matrix"""
transition = np.zeros((self.k_states, self.k_states))
# Exogenous regressors component
if self.state_regression:
start = -self.k_exog
# T_\beta
transition[start:, start:] = np.eye(self.k_exog)
# Autoregressive component
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
# Autoregressive component
start = -self._k_order
end = None
# T_c
transition[start:end, start:end] = companion_matrix(self._k_order)
if self.hamilton_representation:
transition[start:end, start:end] = np.transpose(
companion_matrix(self._k_order)
)
# Seasonal differencing component
# T^*
if self._k_seasonal_diff > 0:
seasonal_companion = companion_matrix(self.k_seasons).T
seasonal_companion[0, -1] = 1
for d in range(self._k_seasonal_diff):
start = self._k_diff + d * self.k_seasons
end = self._k_diff + (d + 1) * self.k_seasons
# T_c^*
transition[start:end, start:end] = seasonal_companion
# i
for i in range(d + 1, self._k_seasonal_diff):
transition[start, end + self.k_seasons - 1] = 1
# \iota
transition[start, self._k_states_diff] = 1
# Differencing component
if self._k_diff > 0:
idx = np.triu_indices(self._k_diff)
# T^**
transition[idx] = 1
# [0 1]
if self.k_seasons > 0:
start = self._k_diff
end = self._k_states_diff
transition[:self._k_diff, start:end] = (
([0] * (self.k_seasons - 1) + [1]) * self._k_seasonal_diff
)
# [1 0]
column = self._k_states_diff
transition[:self._k_diff, column] = 1
return transition
@property
def initial_selection(self):
"""Initial selection matrix"""
if not (self.state_regression and self.time_varying_regression):
if self.k_posdef > 0:
selection = np.r_[
[0] * (self._k_states_diff),
[1] * (self._k_order > 0), [0] * (self._k_order - 1),
[0] * ((1 - self.mle_regression) * self.k_exog)
][:, None]
else:
selection = np.zeros((self.k_states, 0))
else:
selection = np.zeros((self.k_states, self.k_posdef))
# Typical state variance
if self._k_order > 0:
selection[0, 0] = 1
# Time-varying regression coefficient variances
for i in range(self.k_exog, 0, -1):
selection[-i, -i] = 1
return selection
def filter(self, params, transformed=True, cov_type=None, return_ssm=False,
**kwargs):
params = np.array(params, ndmin=1)
# Transform parameters if necessary
if not transformed:
params = self.transform_params(params)
transformed = True
# Get the state space output
result = super(SARIMAX, self).filter(params, transformed, cov_type,
return_ssm=True, **kwargs)
# Wrap in a results object
if not return_ssm:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
result = SARIMAXResultsWrapper(
SARIMAXResults(self, params, result, **result_kwargs)
)
return result
def smooth(self, params, transformed=True, cov_type=None, return_ssm=False,
**kwargs):
params = np.array(params, ndmin=1)
# Transform parameters if necessary
if not transformed:
params = self.transform_params(params)
transformed = True
# Get the state space output
result = super(SARIMAX, self).smooth(params, transformed, cov_type,
return_ssm=True, **kwargs)
# Wrap in a results object
if not return_ssm:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
result = SARIMAXResultsWrapper(
SARIMAXResults(self, params, result, **result_kwargs)
)
return result
@staticmethod
def _conditional_sum_squares(endog, k_ar, polynomial_ar, k_ma,
polynomial_ma, k_trend=0, trend_data=None):
k = 2 * k_ma
r = max(k + k_ma, k_ar)
k_params_ar = 0 if k_ar == 0 else len(polynomial_ar.nonzero()[0]) - 1
k_params_ma = 0 if k_ma == 0 else len(polynomial_ma.nonzero()[0]) - 1
residuals = None
if k_ar + k_ma + k_trend > 0:
# If we have MA terms, get residuals from an AR(k) model to use
# as data for conditional sum of squares estimates of the MA
# parameters
if k_ma > 0:
Y = endog[k:]
X = lagmat(endog, k, trim='both')
params_ar = np.linalg.pinv(X).dot(Y)
residuals = Y - np.dot(X, params_ar)
# Run an ARMA(p,q) model using the just computed residuals as data
Y = endog[r:]
X = np.empty((Y.shape[0], 0))
if k_trend > 0:
if trend_data is None:
raise ValueError('Trend data must be provided if'
' `k_trend` > 0.')
X = np.c_[X, trend_data[:(-r if r > 0 else None), :]]
if k_ar > 0:
cols = polynomial_ar.nonzero()[0][1:] - 1
X = np.c_[X, lagmat(endog, k_ar)[r:, cols]]
if k_ma > 0:
cols = polynomial_ma.nonzero()[0][1:] - 1
X = np.c_[X, lagmat(residuals, k_ma)[r-k:, cols]]
# Get the array of [ar_params, ma_params]
params = np.linalg.pinv(X).dot(Y)
residuals = Y - np.dot(X, params)
# Default output
params_trend = []
params_ar = []
params_ma = []
params_variance = []
# Get the params
offset = 0
if k_trend > 0:
params_trend = params[offset:k_trend + offset]
offset += k_trend
if k_ar > 0:
params_ar = params[offset:k_params_ar + offset]
offset += k_params_ar
if k_ma > 0:
params_ma = params[offset:k_params_ma + offset]
offset += k_params_ma
if residuals is not None:
params_variance = (residuals[k_params_ma:]**2).mean()
return (params_trend, params_ar, params_ma,
params_variance)
@property
def start_params(self):
"""
Starting parameters for maximum likelihood estimation
"""
# Perform differencing if necessary (i.e. if simple differencing is
# false so that the state-space model will use the entire dataset)
trend_data = self._trend_data
if not self.simple_differencing and (
self._k_diff > 0 or self._k_seasonal_diff > 0):
endog = diff(self.endog, self._k_diff,
self._k_seasonal_diff, self.k_seasons)
if self.exog is not None:
exog = diff(self.exog, self._k_diff,
self._k_seasonal_diff, self.k_seasons)
else:
exog = None
trend_data = trend_data[:endog.shape[0], :]
else:
endog = self.endog.copy()
exog = self.exog.copy() if self.exog is not None else None
endog = endog.squeeze()
# Although the Kalman filter can deal with missing values in endog,
# conditional sum of squares cannot
if np.any(np.isnan(endog)):
endog = endog[~np.isnan(endog)]
if exog is not None:
exog = exog[~np.isnan(endog)]
if trend_data is not None:
trend_data = trend_data[~np.isnan(endog)]
# Regression effects via OLS
params_exog = []
if self.k_exog > 0:
params_exog = np.linalg.pinv(exog).dot(endog)
endog -= np.dot(exog, params_exog)
if self.state_regression:
params_exog = []
# Non-seasonal ARMA component and trend
(params_trend, params_ar, params_ma,
params_variance) = self._conditional_sum_squares(
endog, self.k_ar, self.polynomial_ar, self.k_ma,
self.polynomial_ma, self.k_trend, trend_data
)
# If we have estimated non-stationary start parameters but enforce
# stationarity is on, raise an error
invalid_ar = (
self.k_ar > 0 and
self.enforce_stationarity and
not is_invertible(np.r_[1, -params_ar])
)
if invalid_ar:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# If we have estimated non-invertible start parameters but enforce
# invertibility is on, raise an error
invalid_ma = (
self.k_ma > 0 and
self.enforce_invertibility and
not is_invertible(np.r_[1, params_ma])
)
if invalid_ma:
raise ValueError('non-invertible starting MA parameters found'
' with `enforce_invertibility` set to True.')
# Seasonal Parameters
_, params_seasonal_ar, params_seasonal_ma, params_seasonal_variance = (
self._conditional_sum_squares(
endog, self.k_seasonal_ar, self.polynomial_seasonal_ar,
self.k_seasonal_ma, self.polynomial_seasonal_ma
)
)
# If we have estimated non-stationary start parameters but enforce
# stationarity is on, raise an error
invalid_seasonal_ar = (
self.k_seasonal_ar > 0 and
self.enforce_stationarity and
not is_invertible(np.r_[1, -params_seasonal_ar])
)
if invalid_seasonal_ar:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# If we have estimated non-invertible start parameters but enforce
# invertibility is on, raise an error
invalid_seasonal_ma = (
self.k_seasonal_ma > 0 and
self.enforce_invertibility and
not is_invertible(np.r_[1, params_seasonal_ma])
)
if invalid_seasonal_ma:
raise ValueError('non-invertible starting seasonal moving average'
' parameters found with `enforce_invertibility`'
' set to True.')
# Variances
params_exog_variance = []
if self.state_regression and self.time_varying_regression:
# TODO how to set the initial variance parameters?
params_exog_variance = [1] * self.k_exog
if self.state_error and params_variance == []:
if not params_seasonal_variance == []:
params_variance = params_seasonal_variance
elif self.k_exog > 0:
params_variance = np.dot(endog, endog)
else:
params_variance = 1
params_measurement_variance = 1 if self.measurement_error else []
# Combine all parameters
return np.r_[
params_trend,
params_exog,
params_ar,
params_ma,
params_seasonal_ar,
params_seasonal_ma,
params_exog_variance,
params_measurement_variance,
params_variance
]
@property
def endog_names(self, latex=False):
"""Names of endogenous variables"""
diff = ''
if self.k_diff > 0:
if self.k_diff == 1:
diff = '\Delta' if latex else 'D'
else:
diff = ('\Delta^%d' if latex else 'D%d') % self.k_diff
seasonal_diff = ''
if self.k_seasonal_diff > 0:
if self.k_seasonal_diff == 1:
seasonal_diff = (('\Delta_%d' if latex else 'DS%d') %
(self.k_seasons))
else:
seasonal_diff = (('\Delta_%d^%d' if latex else 'D%dS%d') %
(self.k_seasonal_diff, self.k_seasons))
endog_diff = self.simple_differencing
if endog_diff and self.k_diff > 0 and self.k_seasonal_diff > 0:
return (('%s%s %s' if latex else '%s.%s.%s') %
(diff, seasonal_diff, self.data.ynames))
elif endog_diff and self.k_diff > 0:
return (('%s %s' if latex else '%s.%s') %
(diff, self.data.ynames))
elif endog_diff and self.k_seasonal_diff > 0:
return (('%s %s' if latex else '%s.%s') %
(seasonal_diff, self.data.ynames))
else:
return self.data.ynames
params_complete = [
'trend', 'exog', 'ar', 'ma', 'seasonal_ar', 'seasonal_ma',
'exog_variance', 'measurement_variance', 'variance'
]
@property
def param_terms(self):
"""
List of parameters actually included in the model, in sorted order.
TODO Make this an OrderedDict with slice or indices as the values.
"""
model_orders = self.model_orders
# Get basic list from model orders
params = [
order for order in self.params_complete
if model_orders[order] > 0
]
# k_exog may be positive without associated parameters if it is in the
# state vector
if 'exog' in params and not self.mle_regression:
params.remove('exog')
return params
@property
def param_names(self):
"""
List of human readable parameter names (for parameters actually
included in the model).
"""
params_sort_order = self.param_terms
model_names = self.model_names
return [
name for param in params_sort_order for name in model_names[param]
]
@property
def model_orders(self):
"""
The orders of each of the polynomials in the model.
"""
return {
'trend': self.k_trend,
'exog': self.k_exog,
'ar': self.k_ar,
'ma': self.k_ma,
'seasonal_ar': self.k_seasonal_ar,
'seasonal_ma': self.k_seasonal_ma,
'reduced_ar': self.k_ar + self.k_seasonal_ar,
'reduced_ma': self.k_ma + self.k_seasonal_ma,
'exog_variance': self.k_exog if (
self.state_regression and self.time_varying_regression) else 0,
'measurement_variance': int(self.measurement_error),
'variance': int(self.state_error),
}
@property
def model_names(self):
"""
The plain text names of all possible model parameters.
"""
return self._get_model_names(latex=False)
@property
def model_latex_names(self):
"""
The latex names of all possible model parameters.
"""
return self._get_model_names(latex=True)
def _get_model_names(self, latex=False):
names = {
'trend': None,
'exog': None,
'ar': None,
'ma': None,
'seasonal_ar': None,
'seasonal_ma': None,
'reduced_ar': None,
'reduced_ma': None,
'exog_variance': None,
'measurement_variance': None,
'variance': None,
}
# Trend
if self.k_trend > 0:
trend_template = 't_%d' if latex else 'trend.%d'
names['trend'] = []
for i in self.polynomial_trend.nonzero()[0]:
if i == 0:
names['trend'].append('intercept')
elif i == 1:
names['trend'].append('drift')
else:
names['trend'].append(trend_template % i)
# Exogenous coefficients
if self.k_exog > 0:
names['exog'] = self.exog_names
# Autoregressive
if self.k_ar > 0:
ar_template = '$\\phi_%d$' if latex else 'ar.L%d'
names['ar'] = []
for i in self.polynomial_ar.nonzero()[0][1:]:
names['ar'].append(ar_template % i)
# Moving Average
if self.k_ma > 0:
ma_template = '$\\theta_%d$' if latex else 'ma.L%d'
names['ma'] = []
for i in self.polynomial_ma.nonzero()[0][1:]:
names['ma'].append(ma_template % i)
# Seasonal Autoregressive
if self.k_seasonal_ar > 0:
seasonal_ar_template = (
'$\\tilde \\phi_%d$' if latex else 'ar.S.L%d'
)
names['seasonal_ar'] = []
for i in self.polynomial_seasonal_ar.nonzero()[0][1:]:
names['seasonal_ar'].append(seasonal_ar_template % i)
# Seasonal Moving Average
if self.k_seasonal_ma > 0:
seasonal_ma_template = (
'$\\tilde \\theta_%d$' if latex else 'ma.S.L%d'
)
names['seasonal_ma'] = []
for i in self.polynomial_seasonal_ma.nonzero()[0][1:]:
names['seasonal_ma'].append(seasonal_ma_template % i)
# Reduced Form Autoregressive
if self.k_ar > 0 or self.k_seasonal_ar > 0:
reduced_polynomial_ar = reduced_polynomial_ar = -np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
ar_template = '$\\Phi_%d$' if latex else 'ar.R.L%d'
names['reduced_ar'] = []
for i in reduced_polynomial_ar.nonzero()[0][1:]:
names['reduced_ar'].append(ar_template % i)
# Reduced Form Moving Average
if self.k_ma > 0 or self.k_seasonal_ma > 0:
reduced_polynomial_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
ma_template = '$\\Theta_%d$' if latex else 'ma.R.L%d'
names['reduced_ma'] = []
for i in reduced_polynomial_ma.nonzero()[0][1:]:
names['reduced_ma'].append(ma_template % i)
# Exogenous variances
if self.state_regression and self.time_varying_regression:
exog_var_template = '$\\sigma_\\text{%s}^2$' if latex else 'var.%s'
names['exog_variance'] = [
exog_var_template % exog_name for exog_name in self.exog_names
]
# Measurement error variance
if self.measurement_error:
meas_var_tpl = (
'$\\sigma_\\eta^2$' if latex else 'var.measurement_error'
)
names['measurement_variance'] = [meas_var_tpl]
# State variance
if self.state_error:
var_tpl = '$\\sigma_\\zeta^2$' if latex else 'sigma2'
names['variance'] = [var_tpl]
return names
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation.
Used primarily to enforce stationarity of the autoregressive lag
polynomial, invertibility of the moving average lag polynomial, and
positive variance parameters.
Parameters
----------
unconstrained : array_like
Unconstrained parameters used by the optimizer.
Returns
-------
constrained : array_like
Constrained parameters used in likelihood evaluation.
Notes
-----
If the lag polynomial has non-consecutive powers (so that the
coefficient is zero on some element of the polynomial), then the
constraint function is not onto the entire space of invertible
polynomials, although it only excludes a very small portion very close
to the invertibility boundary.
"""
unconstrained = np.array(unconstrained, ndmin=1)
constrained = np.zeros(unconstrained.shape, unconstrained.dtype)
start = end = 0
# Retain the trend parameters
if self.k_trend > 0:
end += self.k_trend
constrained[start:end] = unconstrained[start:end]
start += self.k_trend
# Retain any MLE regression coefficients
if self.mle_regression:
end += self.k_exog
constrained[start:end] = unconstrained[start:end]
start += self.k_exog
# Transform the AR parameters (phi) to be stationary
if self.k_ar_params > 0:
end += self.k_ar_params
if self.enforce_stationarity:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_ar_params
# Transform the MA parameters (theta) to be invertible
if self.k_ma_params > 0:
end += self.k_ma_params
if self.enforce_invertibility:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_ma_params
# Transform the seasonal AR parameters (\tilde phi) to be stationary
if self.k_seasonal_ar > 0:
end += self.k_seasonal_ar_params
if self.enforce_stationarity:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_seasonal_ar_params
# Transform the seasonal MA parameters (\tilde theta) to be invertible
if self.k_seasonal_ma_params > 0:
end += self.k_seasonal_ma_params
if self.enforce_invertibility:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_seasonal_ma_params
# Transform the standard deviation parameters to be positive
if self.state_regression and self.time_varying_regression:
end += self.k_exog
constrained[start:end] = unconstrained[start:end]**2
start += self.k_exog
if self.measurement_error:
constrained[start] = unconstrained[start]**2
start += 1
end += 1
if self.state_error:
constrained[start] = unconstrained[start]**2
# start += 1
# end += 1
return constrained
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Used primarily to reverse enforcement of stationarity of the
autoregressive lag polynomial and invertibility of the moving average
lag polynomial.
Parameters
----------
constrained : array_like
Constrained parameters used in likelihood evaluation.
Returns
-------
constrained : array_like
Unconstrained parameters used by the optimizer.
Notes
-----
If the lag polynomial has non-consecutive powers (so that the
coefficient is zero on some element of the polynomial), then the
constraint function is not onto the entire space of invertible
polynomials, although it only excludes a very small portion very close
to the invertibility boundary.
"""
constrained = np.array(constrained, ndmin=1)
unconstrained = np.zeros(constrained.shape, constrained.dtype)
start = end = 0
# Retain the trend parameters
if self.k_trend > 0:
end += self.k_trend
unconstrained[start:end] = constrained[start:end]
start += self.k_trend
# Retain any MLE regression coefficients
if self.mle_regression:
end += self.k_exog
unconstrained[start:end] = constrained[start:end]
start += self.k_exog
# Transform the AR parameters (phi) to be stationary
if self.k_ar_params > 0:
end += self.k_ar_params
if self.enforce_stationarity:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_ar_params
# Transform the MA parameters (theta) to be invertible
if self.k_ma_params > 0:
end += self.k_ma_params
if self.enforce_invertibility:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_ma_params
# Transform the seasonal AR parameters (\tilde phi) to be stationary
if self.k_seasonal_ar > 0:
end += self.k_seasonal_ar_params
if self.enforce_stationarity:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_seasonal_ar_params
# Transform the seasonal MA parameters (\tilde theta) to be invertible
if self.k_seasonal_ma_params > 0:
end += self.k_seasonal_ma_params
if self.enforce_invertibility:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_seasonal_ma_params
# Untransform the standard deviation
if self.state_regression and self.time_varying_regression:
end += self.k_exog
unconstrained[start:end] = constrained[start:end]**0.5
start += self.k_exog
if self.measurement_error:
unconstrained[start] = constrained[start]**0.5
start += 1
end += 1
if self.state_error:
unconstrained[start] = constrained[start]**0.5
# start += 1
# end += 1
return unconstrained
def update(self, params, transformed=True):
"""
Update the parameters of the model
Updates the representation matrices to fill in the new parameter
values.
Parameters
----------
params : array_like
Array of new parameters.
transformed : boolean, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True..
Returns
-------
params : array_like
Array of parameters.
"""
params = super(SARIMAX, self).update(params, transformed)
params_trend = None
params_exog = None
params_ar = None
params_ma = None
params_seasonal_ar = None
params_seasonal_ma = None
params_exog_variance = None
params_measurement_variance = None
params_variance = None
# Extract the parameters
start = end = 0
end += self.k_trend
params_trend = params[start:end]
start += self.k_trend
if self.mle_regression:
end += self.k_exog
params_exog = params[start:end]
start += self.k_exog
end += self.k_ar_params
params_ar = params[start:end]
start += self.k_ar_params
end += self.k_ma_params
params_ma = params[start:end]
start += self.k_ma_params
end += self.k_seasonal_ar_params
params_seasonal_ar = params[start:end]
start += self.k_seasonal_ar_params
end += self.k_seasonal_ma_params
params_seasonal_ma = params[start:end]
start += self.k_seasonal_ma_params
if self.state_regression and self.time_varying_regression:
end += self.k_exog
params_exog_variance = params[start:end]
start += self.k_exog
if self.measurement_error:
params_measurement_variance = params[start]
start += 1
end += 1
if self.state_error:
params_variance = params[start]
# start += 1
# end += 1
# Update lag polynomials
if self.k_ar > 0:
if self.polynomial_ar.dtype == params.dtype:
self.polynomial_ar[self._polynomial_ar_idx] = -params_ar
else:
polynomial_ar = self.polynomial_ar.real.astype(params.dtype)
polynomial_ar[self._polynomial_ar_idx] = -params_ar
self.polynomial_ar = polynomial_ar
if self.k_ma > 0:
if self.polynomial_ma.dtype == params.dtype:
self.polynomial_ma[self._polynomial_ma_idx] = params_ma
else:
polynomial_ma = self.polynomial_ma.real.astype(params.dtype)
polynomial_ma[self._polynomial_ma_idx] = params_ma
self.polynomial_ma = polynomial_ma
if self.k_seasonal_ar > 0:
idx = self._polynomial_seasonal_ar_idx
if self.polynomial_seasonal_ar.dtype == params.dtype:
self.polynomial_seasonal_ar[idx] = -params_seasonal_ar
else:
polynomial_seasonal_ar = (
self.polynomial_seasonal_ar.real.astype(params.dtype)
)
polynomial_seasonal_ar[idx] = -params_seasonal_ar
self.polynomial_seasonal_ar = polynomial_seasonal_ar
if self.k_seasonal_ma > 0:
idx = self._polynomial_seasonal_ma_idx
if self.polynomial_seasonal_ma.dtype == params.dtype:
self.polynomial_seasonal_ma[idx] = params_seasonal_ma
else:
polynomial_seasonal_ma = (
self.polynomial_seasonal_ma.real.astype(params.dtype)
)
polynomial_seasonal_ma[idx] = params_seasonal_ma
self.polynomial_seasonal_ma = polynomial_seasonal_ma
# Get the reduced form lag polynomial terms by multiplying the regular
# and seasonal lag polynomials
# Note: that although the numpy np.polymul examples assume that they
# are ordered from highest degree to lowest, whereas our are from
# lowest to highest, it does not matter.
if self.k_seasonal_ar > 0:
reduced_polynomial_ar = -np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
else:
reduced_polynomial_ar = -self.polynomial_ar
if self.k_seasonal_ma > 0:
reduced_polynomial_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
else:
reduced_polynomial_ma = self.polynomial_ma
# Observation intercept
# Exogenous data with MLE estimation of parameters enters through a
# time-varying observation intercept (is equivalent to simply
# subtracting it out of the endogenous variable first)
if self.mle_regression:
self.ssm['obs_intercept'] = np.dot(self.exog, params_exog)[None, :]
# State intercept (Harvey) or additional observation intercept
# (Hamilton)
# SARIMA trend enters through the a time-varying state intercept,
# associated with the first row of the stationary component of the
# state vector (i.e. the first element of the state vector following
# any differencing elements)
if self.k_trend > 0:
data = np.dot(self._trend_data, params_trend).astype(params.dtype)
if not self.hamilton_representation:
self.ssm['state_intercept', self._k_states_diff, :] = data
else:
# The way the trend enters in the Hamilton representation means
# that the parameter is not an ``intercept'' but instead the
# mean of the process. The trend values in `data` are meant for
# an intercept, and so must be transformed to represent the
# mean instead
if self.hamilton_representation:
data /= np.sum(-reduced_polynomial_ar)
# If we already set the observation intercept for MLE
# regression, just add to it
if self.mle_regression:
self.ssm.obs_intercept += data[None, :]
# Otherwise set it directly
else:
self.ssm.obs_intercept = data[None, :]
# Observation covariance matrix
if self.measurement_error:
self.ssm['obs_cov', 0, 0] = params_measurement_variance
# Transition matrix
if self.k_ar > 0 or self.k_seasonal_ar > 0:
self.ssm[self.transition_ar_params_idx] = reduced_polynomial_ar[1:]
elif not self.ssm.transition.dtype == params.dtype:
# This is required if the transition matrix is not really in use
# (e.g. for an MA(q) process) so that it's dtype never changes as
# the parameters' dtype changes. This changes the dtype manually.
self.ssm.transition = self.ssm.transition.real.astype(params.dtype)
# Selection matrix (Harvey) or Design matrix (Hamilton)
if self.k_ma > 0 or self.k_seasonal_ma > 0:
if not self.hamilton_representation:
self.ssm[self.selection_ma_params_idx] = (
reduced_polynomial_ma[1:]
)
else:
self.ssm[self.design_ma_params_idx] = reduced_polynomial_ma[1:]
# State covariance matrix
if self.k_posdef > 0:
self.ssm['state_cov', 0, 0] = params_variance
if self.state_regression and self.time_varying_regression:
self.ssm[self._exog_variance_idx] = params_exog_variance
# Initialize
if not self._manual_initialization:
self.initialize_state()
return params
class SARIMAXResults(MLEResults):
"""
Class to hold results from fitting an SARIMAX model.
Parameters
----------
model : SARIMAX instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the SARIMAX model instance.
polynomial_ar : array
Array containing autoregressive lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal autoregressive lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients, ordered from lowest
degree to highest. Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
model_orders : list of int
The orders of each of the polynomials in the model.
param_terms : list of str
List of parameters actually included in the model, in sorted order.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model, params, filter_results, cov_type='opg', **kwargs):
super(SARIMAXResults, self).__init__(model, params, filter_results,
cov_type, **kwargs)
self.df_resid = np.inf # attribute required for wald tests
# Save _init_kwds
self._init_kwds = self.model._get_init_kwds()
# Save model specification
self.specification = Bunch(**{
# Set additional model parameters
'k_seasons': self.model.k_seasons,
'measurement_error': self.model.measurement_error,
'time_varying_regression': self.model.time_varying_regression,
'mle_regression': self.model.mle_regression,
'simple_differencing': self.model.simple_differencing,
'enforce_stationarity': self.model.enforce_stationarity,
'enforce_invertibility': self.model.enforce_invertibility,
'hamilton_representation': self.model.hamilton_representation,
'order': self.model.order,
'seasonal_order': self.model.seasonal_order,
# Model order
'k_diff': self.model.k_diff,
'k_seasonal_diff': self.model.k_seasonal_diff,
'k_ar': self.model.k_ar,
'k_ma': self.model.k_ma,
'k_seasonal_ar': self.model.k_seasonal_ar,
'k_seasonal_ma': self.model.k_seasonal_ma,
# Param Numbers
'k_ar_params': self.model.k_ar_params,
'k_ma_params': self.model.k_ma_params,
# Trend / Regression
'trend': self.model.trend,
'k_trend': self.model.k_trend,
'k_exog': self.model.k_exog,
'mle_regression': self.model.mle_regression,
'state_regression': self.model.state_regression,
})
# Polynomials
self.polynomial_trend = self.model.polynomial_trend
self.polynomial_ar = self.model.polynomial_ar
self.polynomial_ma = self.model.polynomial_ma
self.polynomial_seasonal_ar = self.model.polynomial_seasonal_ar
self.polynomial_seasonal_ma = self.model.polynomial_seasonal_ma
self.polynomial_reduced_ar = np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
self.polynomial_reduced_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
# Distinguish parameters
self.model_orders = self.model.model_orders
self.param_terms = self.model.param_terms
start = end = 0
for name in self.param_terms:
end += self.model_orders[name]
setattr(self, '_params_%s' % name, self.params[start:end])
start += self.model_orders[name]
@cache_readonly
def arroots(self):
"""
(array) Roots of the reduced form autoregressive lag polynomial
"""
return np.roots(self.polynomial_reduced_ar)**-1
@cache_readonly
def maroots(self):
"""
(array) Roots of the reduced form moving average lag polynomial
"""
return np.roots(self.polynomial_reduced_ma)**-1
@cache_readonly
def arfreq(self):
"""
(array) Frequency of the roots of the reduced form autoregressive
lag polynomial
"""
z = self.arroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def mafreq(self):
"""
(array) Frequency of the roots of the reduced form moving average
lag polynomial
"""
z = self.maroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def arparams(self):
"""
(array) Autoregressive parameters actually estimated in the model.
Does not include parameters whose values are constrained to be zero.
"""
return self._params_ar
@cache_readonly
def maparams(self):
"""
(array) Moving average parameters actually estimated in the model.
Does not include parameters whose values are constrained to be zero.
"""
return self._params_ma
def predict(self, start=None, end=None, exog=None, dynamic=False,
**kwargs):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables if
end is beyond the last observation in the sample.
dynamic : boolean, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
full_results : boolean, optional
If True, returns a FilterResults instance; if False returns a
tuple with forecasts, the forecast errors, and the forecast error
covariance matrices. Default is False.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
if start is None:
start = 0
# Handle end (e.g. date)
_start = self.model._get_predict_start(start)
_end, _out_of_sample = self.model._get_predict_end(end)
# Handle exogenous parameters
if _out_of_sample and (self.model.k_exog + self.model.k_trend > 0):
# Create a new faux SARIMAX model for the extended dataset
nobs = self.model.data.orig_endog.shape[0] + _out_of_sample
endog = np.zeros((nobs, self.model.k_endog))
if self.model.k_exog > 0:
if exog is None:
raise ValueError('Out-of-sample forecasting in a model'
' with a regression component requires'
' additional exogenous values via the'
' `exog` argument.')
exog = np.array(exog)
required_exog_shape = (_out_of_sample, self.model.k_exog)
if not exog.shape == required_exog_shape:
raise ValueError('Provided exogenous values are not of the'
' appropriate shape. Required %s, got %s.'
% (str(required_exog_shape),
str(exog.shape)))
exog = np.c_[self.model.data.orig_exog.T, exog.T].T
model_kwargs = self._init_kwds.copy()
model_kwargs['exog'] = exog
model = SARIMAX(endog, **model_kwargs)
model.update(self.params)
# Set the kwargs with the update time-varying state space
# representation matrices
for name in self.filter_results.shapes.keys():
if name == 'obs':
continue
mat = getattr(model.ssm, name)
if mat.shape[-1] > 1:
if len(mat.shape) == 2:
kwargs[name] = mat[:, -_out_of_sample:]
else:
kwargs[name] = mat[:, :, -_out_of_sample:]
elif self.model.k_exog == 0 and exog is not None:
warn('Exogenous array provided to predict, but additional data not'
' required. `exog` argument ignored.')
return super(SARIMAXResults, self).predict(
start=start, end=end, exog=exog, dynamic=dynamic, **kwargs
)
def forecast(self, steps=1, exog=None, **kwargs):
"""
Out-of-sample forecasts
Parameters
----------
steps : int, optional
The number of out of sample forecasts from the end of the
sample. Default is 1.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables for
each step forecasted.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
return super(SARIMAXResults, self).forecast(steps, exog=exog, **kwargs)
def summary(self, alpha=.05, start=None):
# Create the model name
# See if we have an ARIMA component
order = ''
if self.model.k_ar + self.model.k_diff + self.model.k_ma > 0:
if self.model.k_ar == self.model.k_ar_params:
order_ar = self.model.k_ar
else:
order_ar = tuple(self.polynomial_ar.nonzero()[0][1:])
if self.model.k_ma == self.model.k_ma_params:
order_ma = self.model.k_ma
else:
order_ma = tuple(self.polynomial_ma.nonzero()[0][1:])
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_diff = 0 if self.model.simple_differencing else self.model.k_diff
order = '(%s, %d, %s)' % (order_ar, k_diff, order_ma)
# See if we have an SARIMA component
seasonal_order = ''
has_seasonal = (
self.model.k_seasonal_ar +
self.model.k_seasonal_diff +
self.model.k_seasonal_ma
) > 0
if has_seasonal:
if self.model.k_ar == self.model.k_ar_params:
order_seasonal_ar = (
int(self.model.k_seasonal_ar / self.model.k_seasons)
)
else:
order_seasonal_ar = (
tuple(self.polynomial_seasonal_ar.nonzero()[0][1:])
)
if self.model.k_ma == self.model.k_ma_params:
order_seasonal_ma = (
int(self.model.k_seasonal_ma / self.model.k_seasons)
)
else:
order_seasonal_ma = (
tuple(self.polynomial_seasonal_ma.nonzero()[0][1:])
)
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_seasonal_diff = self.model.k_seasonal_diff
if self.model.simple_differencing:
k_seasonal_diff = 0
seasonal_order = ('(%s, %d, %s, %d)' %
(str(order_seasonal_ar), k_seasonal_diff,
str(order_seasonal_ma), self.model.k_seasons))
if not order == '':
order += 'x'
model_name = (
'%s%s%s' % (self.model.__class__.__name__, order, seasonal_order)
)
return super(SARIMAXResults, self).summary(
alpha=alpha, start=start, model_name=model_name
)
summary.__doc__ = MLEResults.summary.__doc__
class SARIMAXResultsWrapper(MLEResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(SARIMAXResultsWrapper, SARIMAXResults)
| bsd-3-clause |
ishanic/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
dismalpy/dismalpy | dismalpy/ssm/tests/test_structural.py | 1 | 8654 | """
Tests for structural time series models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import warnings
from statsmodels.datasets import macrodata
from dismalpy.ssm import structural
from dismalpy.ssm.structural import UnobservedComponents
from .results import results_structural
from statsmodels.tools import add_constant
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
dta = macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-07-01', freq='QS')
def run_ucm(name):
true = getattr(results_structural, name)
for model in true['models']:
kwargs = model.copy()
kwargs.update(true['kwargs'])
# Make a copy of the data
values = dta.copy()
freq = kwargs.pop('freq', None)
if freq is not None:
values.index = pd.date_range(start='1959-01-01', periods=len(dta),
freq=freq)
# Test pandas exog
if 'exog' in kwargs:
# Default value here is pd.Series object
exog = np.log(values['realgdp'])
# Also allow a check with a 1-dim numpy array
if kwargs['exog'] == 'numpy':
exog = exog.values.squeeze()
kwargs['exog'] = exog
# Create the model
mod = UnobservedComponents(values['unemp'], **kwargs)
# Smoke test for starting parameters, untransform, transform
# Also test that transform and untransform are inverses
mod.start_params
assert_allclose(mod.start_params, mod.transform_params(mod.untransform_params(mod.start_params)))
# Fit the model at the true parameters
res_true = mod.filter(true['params'])
# Check that the cycle bounds were computed correctly
freqstr = freq[0] if freq is not None else values.index.freqstr[0]
if freqstr == 'A':
cycle_period_bounds = (1.5, 12)
elif freqstr == 'Q':
cycle_period_bounds = (1.5*4, 12*4)
elif freqstr == 'M':
cycle_period_bounds = (1.5*12, 12*12)
else:
# If we have no information on data frequency, require the
# cycle frequency to be between 0 and pi
cycle_period_bounds = (2, np.inf)
# Test that the cycle frequency bound is correct
assert_equal(mod.cycle_frequency_bound,
(2*np.pi / cycle_period_bounds[1],
2*np.pi / cycle_period_bounds[0])
)
# Test that the likelihood is correct
rtol = true.get('rtol', 1e-7)
atol = true.get('atol', 0)
assert_allclose(res_true.llf, true['llf'], rtol=rtol, atol=atol)
# Smoke test for plot_components
if have_matplotlib:
fig = res_true.plot_components()
plt.close(fig)
# Now fit the model via MLE
with warnings.catch_warnings(record=True) as w:
res = mod.fit(disp=-1)
# If we found a higher likelihood, no problem; otherwise check
# that we're very close to that found by R
if res.llf <= true['llf']:
assert_allclose(res.llf, true['llf'], rtol=1e-4)
# Smoke test for summary
res.summary()
def test_irregular():
run_ucm('irregular')
def test_fixed_intercept():
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
run_ucm('fixed_intercept')
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
def test_deterministic_constant():
run_ucm('deterministic_constant')
def test_random_walk():
run_ucm('random_walk')
def test_local_level():
run_ucm('local_level')
def test_fixed_slope():
run_ucm('fixed_slope')
def test_fixed_slope():
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
run_ucm('fixed_slope')
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
def test_deterministic_trend():
run_ucm('deterministic_trend')
def test_random_walk_with_drift():
run_ucm('random_walk_with_drift')
def test_local_linear_deterministic_trend():
run_ucm('local_linear_deterministic_trend')
def test_local_linear_trend():
run_ucm('local_linear_trend')
def test_smooth_trend():
run_ucm('smooth_trend')
def test_random_trend():
run_ucm('random_trend')
def test_cycle():
run_ucm('cycle')
def test_seasonal():
run_ucm('seasonal')
def test_reg():
run_ucm('reg')
def test_rtrend_ar1():
run_ucm('rtrend_ar1')
def test_lltrend_cycle_seasonal_reg_ar1():
run_ucm('lltrend_cycle_seasonal_reg_ar1')
def test_mle_reg():
endog = np.arange(100)*1.0
exog = endog*2
# Make the fit not-quite-perfect
endog[::2] += 0.01
endog[1::2] -= 0.01
with warnings.catch_warnings(record=True) as w:
mod1 = UnobservedComponents(endog, irregular=True, exog=exog, mle_regression=False)
res1 = mod1.fit(disp=-1)
mod2 = UnobservedComponents(endog, irregular=True, exog=exog, mle_regression=True)
res2 = mod2.fit(disp=-1)
assert_allclose(res1.regression_coefficients.filtered[0, -1], 0.5, atol=1e-5)
assert_allclose(res2.params[1], 0.5, atol=1e-5)
def test_specifications():
endog = [1, 2]
# Test that when nothing specified, a warning is issued and the model that
# is fit is one with irregular=True and nothing else.
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
mod = UnobservedComponents(endog)
message = ("Specified model does not contain a stochastic element;"
" irregular component added.")
assert_equal(str(w[0].message), message)
assert_equal(mod.trend_specification, 'irregular')
# Test an invalid string trend specification
assert_raises(ValueError, UnobservedComponents, endog, 'invalid spec')
# Test that if a trend component is specified without a level component,
# a warning is issued and a deterministic level component is added
with warnings.catch_warnings(record=True) as w:
mod = UnobservedComponents(endog, trend=True, irregular=True)
message = ("Trend component specified without level component;"
" deterministic level component added.")
assert_equal(str(w[0].message), message)
assert_equal(mod.trend_specification, 'deterministic trend')
# Test that if a string specification is provided, a warning is issued if
# the boolean attributes are also specified
trend_attributes = ['irregular', 'trend', 'stochastic_level',
'stochastic_trend']
for attribute in trend_attributes:
with warnings.catch_warnings(record=True) as w:
kwargs = {attribute: True}
mod = UnobservedComponents(endog, 'deterministic trend', **kwargs)
message = ("Value of `%s` may be overridden when the trend"
" component is specified using a model string."
% attribute)
assert_equal(str(w[0].message), message)
# Test that a seasonal with period less than two is invalid
assert_raises(ValueError, UnobservedComponents, endog, seasonal=1)
def test_start_params():
# Test that the behavior is correct for multiple exogenous and / or
# autoregressive components
# Parameters
nobs = int(1e4)
beta = np.r_[10, -2]
phi = np.r_[0.5, 0.1]
# Generate data
np.random.seed(1234)
exog = np.c_[np.ones(nobs), np.arange(nobs)*1.0]
eps = np.random.normal(size=nobs)
endog = np.zeros(nobs+2)
for t in range(1, nobs):
endog[t+1] = phi[0] * endog[t] + phi[1] * endog[t-1] + eps[t]
endog = endog[2:]
endog += np.dot(exog, beta)
# Now just test that the starting parameters are approximately what they
# ought to be (could make this arbitrarily precise by increasing nobs,
# but that would slow down the test for no real gain)
mod = UnobservedComponents(endog, exog=exog, autoregressive=2)
assert_allclose(mod.start_params, [1., 0.5, 0.1, 10, -2], atol=1e-1)
| bsd-2-clause |
hasadna/OpenTrain | webserver/opentrain/algorithm/utils.py | 1 | 2435 | import os
import sys
sys.path.append(os.getcwd())
sys.path.append(os.path.dirname(os.getcwd()))
os.environ['DJANGO_SETTINGS_MODULE']='opentrain.settings'
import analysis.models
import numpy as np
from scipy import spatial
try:
import matplotlib.pyplot as plt
except ImportError:
pass
import simplekml
import config
import itertools
import datetime
def enum(**enums):
return type('Enum', (), enums)
def get_XY_pos(relativeNullPoint, p):
""" Calculates X and Y distances in meters.
"""
deltaLatitude = p.latitude - relativeNullPoint.latitude
deltaLongitude = p.longitude - relativeNullPoint.longitude
latitudeCircumference = 40075160 * cos(relativeNullPoint.latitude * pi / 180)
resultX = deltaLongitude * latitudeCircumference / 360
resultY = deltaLatitude * 40008000 / 360
return resultX, resultY
def query_coords(point_tree, query_coords, query_accuracies):
if isinstance( query_accuracies, ( int, long, float ) ):
res = list(point_tree.query_ball_point(query_coords, query_accuracies))
else:
res = [point_tree.query_ball_point(query_coords[i], query_accuracies[i]) for i in xrange(len(query_accuracies))]
return res
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
def is_strictly_increasing(L):
return all(x<y for x, y in zip(L, L[1:]))
def is_increasing(L):
return all(x<=y for x, y in zip(L, L[1:]))
def find_index_of_first_consecutive_value(values, start_index):
res = None
for i in reversed(range(start_index)):
if values[i] != values[start_index]:
res = i+1
break
elif i == 0:
res = 0
break
return res
def get_report_counts_and_dates(do_print=False):
result = []
device_ids = analysis.models.Report.objects.values_list('device_id', flat=True).distinct()
for device_id in device_ids:
count = analysis.models.Report.objects.filter(device_id=device_id).count()
report = analysis.models.Report.objects.filter(device_id=device_id).order_by('timestamp')[:1].get()
result.append((report.timestamp.date(), count, device_id))
result = sorted(result)
if do_print:
for x in result:
print x
return result
if __name__ == '__main__':
pass
get_report_counts_and_dates(True)
| bsd-3-clause |
aleksandr-bakanov/astropy | astropy/table/table.py | 2 | 134662 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .index import TableIndices, TableLoc, TableILoc, TableLocIndices
import sys
from collections import OrderedDict, defaultdict
from collections.abc import Mapping
import warnings
from copy import deepcopy
import types
import numpy as np
from numpy import ma
from astropy import log
from astropy.units import Quantity, QuantityInfo
from astropy.utils import isiterable, ShapedLikeNDArray
from astropy.utils.console import color_print
from astropy.utils.metadata import MetaData
from astropy.utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo
from astropy.utils.decorators import format_doc
from astropy.utils.exceptions import AstropyDeprecationWarning, NoValue
from astropy.io.registry import UnifiedReadWriteMethod
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy)
from .row import Row
from .np_utils import fix_column_name
from .info import TableInfo
from .index import Index, _IndexModeContext, get_index
from .connect import TableRead, TableWrite
from . import conf
_implementation_notes = """
This string has informal notes concerning Table implementation for developers.
Things to remember:
- Table has customizable attributes ColumnClass, Column, MaskedColumn.
Table.Column is normally just column.Column (same w/ MaskedColumn)
but in theory they can be different. Table.ColumnClass is the default
class used to create new non-mixin columns, and this is a function of
the Table.masked attribute. Column creation / manipulation in a Table
needs to respect these.
- Column objects that get inserted into the Table.columns attribute must
have the info.parent_table attribute set correctly. Beware just dropping
an object into the columns dict since an existing column may
be part of another Table and have parent_table set to point at that
table. Dropping that column into `columns` of this Table will cause
a problem for the old one so the column object needs to be copied (but
not necessarily the data).
Currently replace_column is always making a copy of both object and
data if parent_table is set. This could be improved but requires a
generic way to copy a mixin object but not the data.
- Be aware of column objects that have indices set.
- `cls.ColumnClass` is a property that effectively uses the `masked` attribute
to choose either `cls.Column` or `cls.MaskedColumn`.
"""
__doctest_skip__ = ['Table.read', 'Table.write', 'Table._read',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
__doctest_requires__ = {'*pandas': ['pandas']}
_pprint_docs = """
{__doc__}
Parameters
----------
max_lines : int or `None`
Maximum number of lines in table output.
max_width : int or `None`
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
_pformat_docs = """
{__doc__}
Parameters
----------
max_lines : int or `None`
Maximum number of rows to output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or `None`
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
return hasattr(obj, 'info') and isinstance(obj.info, cls)
def _get_names_from_list_of_dict(rows):
"""Return list of column names if ``rows`` is a list of dict that
defines table data.
If rows is not a list of dict then return None.
"""
if rows is None:
return None
names = set()
for row in rows:
if not isinstance(row, dict):
return None
names.update(row)
return list(names)
# Note to future maintainers: when transitioning this to dict
# be sure to change the OrderedDict ref(s) in Row and in __len__().
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return self.values()[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.values()[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value, validated=False):
"""
Set item in this dict instance, but do not allow directly replacing an
existing column unless it is already validated (and thus is certain to
not corrupt the table).
NOTE: it is easily possible to corrupt a table by directly *adding* a new
key to the TableColumns attribute of a Table, e.g.
``t.columns['jane'] = 'doe'``.
"""
if item in self and not validated:
raise ValueError("Cannot replace column '{}'. Use Table.replace_column() instead."
.format(item))
super().__setitem__(item, value)
def __repr__(self):
names = (f"'{x}'" for x in self.keys())
return "<{1} names=({0})>".format(",".join(names), self.__class__.__name__)
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError(f"Column {new_name} already exists")
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
# Define keys and values for Python 2 and 3 source compatibility
def keys(self):
return list(OrderedDict.keys(self))
def values(self):
return list(OrderedDict.values(self))
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class TableReadWrite:
def __get__(self, instance, owner_cls):
if instance is None:
# This is an unbound descriptor on the class
info = self
info._parent_cls = owner_cls
else:
info = instance.__dict__.get('info')
if info is None:
info = instance.__dict__['info'] = self.__class__(bound=True)
info._parent = instance
return info
class Table:
"""A class to represent tables of heterogeneous data.
`~astropy.table.Table` provides a class for heterogeneous tabular data.
A key enhancement provided by the `~astropy.table.Table` class over
e.g. a `numpy` structured array is the ability to easily modify the
structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`~astropy.table.Table` differs from `~astropy.nddata.NDData` by the
assumption that the input data consists of columns of homogeneous data,
where each column has a unique identifier and may contain additional
metadata such as the data unit, format, and description.
See also: http://docs.astropy.org/en/stable/table/
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
units : list, dict, optional
List or dict of units to apply to columns.
descriptions : list, dict, optional
List or dict of descriptions to apply to columns.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData(copy=False)
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
# Unified I/O read and write methods from .connect
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
def as_array(self, keep_byteorder=False, names=None):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
names : list, optional:
List of column names to include for returned structured array.
Default is to include all table columns.
Returns
-------
table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked)
Copy of table as a numpy structured array
"""
masked = self.masked or self.has_masked_columns or self.has_masked_values
empty_init = ma.empty if masked else np.empty
if len(self.columns) == 0:
return empty_init(0, dtype=None)
sys_byteorder = ('>', '<')[sys.byteorder == 'little']
native_order = ('=', sys_byteorder)
dtype = []
cols = self.columns.values()
if names is not None:
cols = [col for col in cols if col.info.name in names]
for col in cols:
col_descr = descr(col)
byteorder = col.info.dtype.byteorder
if not keep_byteorder and byteorder not in native_order:
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
# For masked out, masked mixin columns need to set output mask attribute.
if masked and has_info_class(col, MixinInfo) and hasattr(col, 'mask'):
data[col.info.name].mask = col.mask
return data
def __init__(self, data=None, masked=False, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
units=None, descriptions=None,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Specifies list of names found for the case of initializing table with
# a list of dict. If data are not list of dict then this is None.
names_from_list_of_dict = None
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if isinstance(rows, types.GeneratorType):
# Without this then the all(..) test below uses up the generator
rows = list(rows)
# Get column names if `rows` is a list of dict, otherwise this is None
names_from_list_of_dict = _get_names_from_list_of_dict(rows)
if names_from_list_of_dict:
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
data = list(zip(*rows))
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied.
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray) and
data.shape == (0,) and
not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
# Get column names from `data` if it is a list of dict, otherwise this is None.
# This might be previously defined if `rows` was supplied as an init arg.
names_from_list_of_dict = (names_from_list_of_dict or
_get_names_from_list_of_dict(data))
if names_from_list_of_dict:
init_func = self._init_from_list_of_dicts
n_cols = len(names_from_list_of_dict)
else:
init_func = self._init_from_list
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
# If user-input meta is None then use data.meta (if non-trivial)
if meta is None and data.meta:
# At this point do NOT deepcopy data.meta as this will happen after
# table init_func() is called. But for table input the table meta
# gets a key copy here if copy=False because later a direct object ref
# is used.
meta = data.meta if copy else data.meta.copy()
# Handle indices on input table. Copy primary key and don't copy indices
# if the input Table is in non-copy mode.
self.primary_key = data.primary_key
self._init_indices = self._init_indices and data._copy_indices
# Extract default names, n_cols, and then overwrite ``data`` to be the
# table columns so we can use _init_from_list.
default_names = data.colnames
n_cols = len(default_names)
data = list(data.columns.values())
init_func = self._init_from_list
elif data is None:
if names is None:
if dtype is None:
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
return
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError('Data type {} not allowed to init Table'
.format(type(data)))
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if names is None:
names = default_names or [None] * n_cols
if dtype is None:
dtype = [None] * n_cols
# Numpy does not support bytes column names on Python 3, so fix them
# up now.
names = [fix_column_name(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Set table meta. If copy=True then deepcopy meta otherwise use the
# user-supplied meta directly.
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
# Whatever happens above, the masked property should be set to a boolean
if self.masked not in (None, True, False):
raise TypeError("masked property must be None, True or False")
self._set_column_attribute('unit', units)
self._set_column_attribute('description', descriptions)
def _set_column_attribute(self, attr, values):
"""Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column
name) or a dict of name: value pairs. This is used for handling the ``units`` and
``descriptions`` kwargs to ``__init__``.
"""
if not values:
return
if isinstance(values, Row):
# For a Row object transform to an equivalent dict.
values = {name: values[name] for name in values.colnames}
if not isinstance(values, dict):
# If not a dict map, assume iterable and map to dict if the right length
if len(values) != len(self.columns):
raise ValueError(f'sequence of {attr} values must match number of columns')
values = dict(zip(self.colnames, values))
for name, value in values.items():
if name not in self.columns:
raise ValueError(f'invalid column name {name} for setting {attr} attribute')
# Special case: ignore unit if it is an empty or blank string
if attr == 'unit' and isinstance(value, str):
if value.strip() == '':
value = None
if value not in (np.ma.masked, None):
setattr(self[name].info, attr, value)
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked or self.has_masked_columns or self.has_masked_values:
mask_table = Table([getattr(col, 'mask', FalseArray(col.shape))
for col in self.itercols()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : Table
New table with masked values filled
"""
if self.masked or self.has_masked_columns or self.has_masked_values:
# Get new columns with masked values filled, then create Table with those
# new cols (copy=False) but deepcopy the meta.
data = [col.filled(fill_value) if hasattr(col, 'filled') else col
for col in self.itercols()]
return self.__class__(data, meta=deepcopy(self.meta), copy=False)
else:
# Return copy of the original object.
return self.copy()
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum([index is x for x in lst]) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, from among SortedArray, BST,
FastBST, FastRBT, and SCEngine. If the supplied argument is None
(by default), use SortedArray.
unique : bool
Whether the values of the index must be unique. Default is False.
'''
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{}", of '
'type "{}"'.format(col.info.name, type(col)))
index = Index(columns, engine=engine, unique=unique)
if not self.indices:
self.primary_key = colnames
for col in columns:
col.info.indices.append(index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
out = self.as_array()
return out.data if isinstance(out, np.ma.MaskedArray) else out
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError(f'{inp_str} must be a list or None')
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
.format(inp_str))
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of dictionaries representing rows."""
# Define placeholder for missing values as a unique object that cannot
# every occur in user data.
MISSING = object()
# Gather column names that exist in the input `data`.
names_from_data = set()
for row in data:
names_from_data.update(row)
# Put names into a preferred order, either using the first row of data
# if it is ordered, or alphabetically. Starting with Python 3.7, dict
# is ordered so this test can be relaxed. (In practice CPython 3.6 is
# this way, but not according to the formal spec).
if (isinstance(data[0], OrderedDict) and
set(data[0].keys()) == names_from_data):
names_from_data = list(data[0].keys())
else:
names_from_data = sorted(names_from_data)
# Note: if set(data[0].keys()) != names_from_data, this will give an
# exception later, so NO need to catch here.
# Convert list of dict into dict of list (cols), keep track of missing
# indexes and put in MISSING placeholders in the `cols` lists.
cols = {}
missing_indexes = defaultdict(list)
for name in names_from_data:
cols[name] = []
for ii, row in enumerate(data):
try:
val = row[name]
except KeyError:
missing_indexes[name].append(ii)
val = MISSING
cols[name].append(val)
# Fill the missing entries with first values
if missing_indexes:
for name, indexes in missing_indexes.items():
col = cols[name]
first_val = next(val for val in col if val is not MISSING)
for index in indexes:
col[index] = first_val
# prepare initialization
if all(name is None for name in names):
names = names_from_data
self._init_from_dict(cols, names, dtype, n_cols, copy)
# Mask the missing values if necessary, converting columns to MaskedColumn
# as needed.
if missing_indexes:
for name, indexes in missing_indexes.items():
col = self[name]
# Ensure that any Column subclasses with MISSING values can support
# setting masked values. As of astropy 4.0 the test condition below is
# always True since _init_from_dict cannot result in mixin columns.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
self[name] = self.MaskedColumn(col, copy=False)
# Finally do the masking in a mixin-safe way.
self[name][indexes] = np.ma.masked
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of column data. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
cols = []
default_names = _auto_names(n_cols)
for col, name, default_name, dtype in zip(data, names, default_names, dtype):
col = self._convert_data_to_col(col, copy, default_name, dtype, name)
cols.append(col)
self._init_from_cols(cols)
def _convert_data_to_col(self, data, copy=True, default_name=None, dtype=None, name=None):
"""
Convert any allowed sequence data ``col`` to a column object that can be used
directly in the self.columns dict. This could be a Column, MaskedColumn,
or mixin column.
The final column name is determined by::
name or data.info.name or def_name
If ``data`` has no ``info`` then ``name = name or def_name``.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
data : object (column-like sequence)
Input column data
copy : bool
Make a copy
default_name : str
Default name
dtype : np.dtype or None
Data dtype
name : str or None
Column name
Returns
-------
col : Column, MaskedColumn, mixin-column type
Object that can be used as a column in self
"""
data_is_mixin = self._is_mixin_for_table(data)
masked_col_cls = (self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn)
try:
data0_is_mixin = self._is_mixin_for_table(data[0])
except Exception:
# Need broad exception, cannot predict what data[0] raises for arbitrary data
data0_is_mixin = False
# Structured ndarray gets viewed as a mixin unless already a valid
# mixin class
if (not isinstance(data, Column) and not data_is_mixin and
isinstance(data, np.ndarray) and len(data.dtype) > 1):
data = data.view(NdarrayMixin)
data_is_mixin = True
# Get the final column name using precedence. Some objects may not
# have an info attribute.
if not name:
if hasattr(data, 'info'):
name = data.info.name or default_name
else:
name = default_name
if isinstance(data, Column):
# If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass,
# otherwise just use the original class. The most common case is a
# table with masked=True and ColumnClass=MaskedColumn. Then a Column
# gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior
# of downgrading from MaskedColumn to Column (for non-masked table)
# does not happen.
col_cls = self._get_col_cls_for_table(data)
elif data_is_mixin:
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
col = col_copy(data, copy_indices=self._init_indices) if copy else data
col.info.name = name
return col
elif data0_is_mixin:
# Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m].
try:
col = data[0].__class__(data)
col.info.name = name
col.info.indices = []
return col
except Exception:
# If that didn't work for some reason, just turn it into np.array of object
data = np.array(data, dtype=object)
col_cls = self.ColumnClass
elif isinstance(data, np.ma.MaskedArray):
# Require that col_cls be a subclass of MaskedColumn, remembering
# that ColumnClass could be a user-defined subclass (though more-likely
# could be MaskedColumn).
col_cls = masked_col_cls
elif not hasattr(data, 'dtype'):
# If value doesn't have a dtype then convert to a masked numpy array.
# Then check if there were any masked elements. This logic is handling
# normal lists like [1, 2] but also odd-ball cases like a list of masked
# arrays (see #8977). Use np.ma.array() to do the heavy lifting.
try:
np_data = np.ma.array(data, dtype=dtype)
except Exception:
# Conversion failed for some reason, e.g. [2, 1*u.m] gives TypeError in Quantity
np_data = np.ma.array(data, dtype=object)
if np_data.ndim > 0 and len(np_data) == 0:
# Implies input was an empty list (e.g. initializing an empty table
# with pre-declared names and dtypes but no data). Here we need to
# fall through to initializing with the original data=[].
col_cls = self.ColumnClass
else:
if np_data.mask is np.ma.nomask:
data = np_data.data
col_cls = self.ColumnClass
else:
data = np_data
col_cls = masked_col_cls
copy = False
else:
# `data` is none of the above, so just go for it and try init'ing Column
col_cls = self.ColumnClass
try:
col = col_cls(name=name, data=data, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
except Exception:
# Broad exception class since we don't know what might go wrong
raise ValueError('unable to convert data to Column for Table')
col = self._convert_col_for_table(col)
return col
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
self._init_from_list(cols, names, dtype, n_cols, copy)
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _get_col_cls_for_table(self, col):
"""Get the correct column class to use for upgrading any Column-like object.
For a masked table, ensure any Column-like object is a subclass
of the table MaskedColumn.
For unmasked table, ensure any MaskedColumn-like object is a subclass
of the table MaskedColumn. If not a MaskedColumn, then ensure that any
Column-like object is a subclass of the table Column.
"""
col_cls = col.__class__
if self.masked:
if isinstance(col, Column) and not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
else:
if isinstance(col, MaskedColumn):
if not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
elif isinstance(col, Column) and not isinstance(col, self.Column):
col_cls = self.Column
return col_cls
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct base class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col_cls = self._get_col_cls_for_table(col)
if col_cls is not col.__class__:
col = col_cls(col, copy=False)
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = set(len(col) for col in cols)
if len(lengths) > 1:
raise ValueError('Inconsistent data column lengths: {}'
.format(lengths))
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
if self.meta:
table.meta = self.meta.copy() # Shallow copy for slice
table.primary_key = self.primary_key
newcols = []
for col in self.columns.values():
newcol = col[slice_]
# Note in line below, use direct attribute access to col.indices for Column
# instances instead of the generic col.info.indices. This saves about 4 usec
# per column.
if (col if isinstance(col, Column) else col.info).indices:
# TODO : as far as I can tell the only purpose of setting _copy_indices
# here is to communicate that to the initial test in `slice_indices`.
# Why isn't that just sent as an arg to the function?
col.info._copy_indices = self._copy_indices
newcol = col.info.slice_indices(newcol, slice_, len(col))
# Don't understand why this is forcing a value on the original column.
# Normally col.info does not even have a _copy_indices attribute. Tests
# still pass if this line is deleted. (Each col.info attribute access
# is expensive).
col.info._copy_indices = True
newcols.append(newcol)
self._make_table_from_cols(table, newcols, verify=False, names=self.columns.keys())
return table
@staticmethod
def _make_table_from_cols(table, cols, verify=True, names=None):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
if names is None:
names = [col.info.name for col in cols]
# Note: we do not test for len(names) == len(cols) if names is not None. In that
# case the function is being called by from "trusted" source (e.g. right above here)
# that is assumed to provide valid inputs. In that case verify=False.
if verify:
if None in names:
raise TypeError('Cannot have None for column name')
if len(set(names)) != len(names):
raise ValueError('Duplicate column names')
table.columns = table.TableColumns((name, col) for name, col in zip(names, cols))
for col in cols:
table._set_col_parent_table_and_mask(col)
def _set_col_parent_table_and_mask(self, col):
"""
Set ``col.parent_table = self`` and force ``col`` to have ``mask``
attribute if the table is masked and ``col.mask`` does not exist.
"""
# For Column instances it is much faster to do direct attribute access
# instead of going through .info
col_info = col if isinstance(col, Column) else col.info
col_info.parent_table = self
# Legacy behavior for masked table
if self.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append('length={}'.format(len(self)))
descr = ' '.join(descr_vals)
if html:
from astropy.utils.xml.writer import xml_escape
descr = '<i>{}</i>\n'.format(xml_escape(descr))
else:
descr = f'<{descr}>\n'
if tableid is None:
tableid = 'table{id}'.format(id=id(self))
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return '\n'.join(self.pformat())
def __bytes__(self):
return str(self).encode('utf-8')
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
@property
def has_masked_columns(self):
"""True if table has any ``MaskedColumn`` columns.
This does not check for mixin columns that may have masked values, use the
``has_masked_values`` property in that case.
"""
return any(isinstance(col, MaskedColumn) for col in self.itercols())
@property
def has_masked_values(self):
"""True if column in the table has values which are masked.
This may be relatively slow for large tables as it requires checking the mask
values of each column.
"""
for col in self.itercols():
if hasattr(col, 'mask') and np.any(col.mask):
return True
else:
return False
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not not Quantity (which gets converted to Column with
# unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
@format_doc(_pprint_docs)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append('Length = {} rows'.format(len(self)))
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
@format_doc(_pprint_docs)
def pprint_all(self, max_lines=-1, max_width=-1, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the entire table.
This method is the same as `astropy.table.Table.pprint` except that
the default ``max_lines`` and ``max_width`` are both -1 so that by
default the entire table is printed instead of restricting to the size
of the screen terminal.
"""
return self.pprint(max_lines, max_width, show_name,
show_unit, show_dtype, align)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + self.columns.values(),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or `None`
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <https://getbootstrap.com/css/#tables>`_
for the list of classes.
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = 'table{}-{}'.format(id(self),
np.random.randint(1, 1e6))
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.info.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or `None`
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from .jsviewer import DEFAULT_CSS
from urllib.parse import urljoin
from urllib.request import pathname2url
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error(f"Browser '{browser}' not found.")
else:
br.open(urljoin('file:', pathname2url(path)))
@format_doc(_pformat_docs, id="{id}")
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append('Length = {} rows'.format(len(self)))
return lines
@format_doc(_pformat_docs, id="{id}")
def pformat_all(self, max_lines=-1, max_width=-1, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the entire table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
return self.pformat(max_lines, max_width, show_name,
show_unit, show_dtype, html, tableid,
align, tableclass)
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__([self[x] for x in item],
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
out.meta = self.meta.copy() # Shallow copy for meta
return out
elif ((isinstance(item, np.ndarray) and item.size == 0) or
(isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError('Illegal type {} for table item access'
.format(type(item)))
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
self.add_column(value, name=item, copy=True)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
(isinstance(item, tuple) and # output from np.where
all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
import itertools
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError('Illegal type {} for table item access'
.format(type(item)))
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif (isinstance(item, (list, tuple, np.ndarray)) and
all(isinstance(x, str) for x in item)):
self.remove_columns(item)
elif (isinstance(item, (list, np.ndarray)) and
np.asarray(item).dtype.kind == 'i'):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError('illegal key or index value')
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
self._column_class = self.MaskedColumn if self._masked else self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings"""
return (isinstance(names, (tuple, list)) and names and
all(isinstance(x, str) for x in names))
def keys(self):
return list(self.columns.keys())
def __len__(self):
# For performance reasons (esp. in Row) cache the first column name
# and use that subsequently for the table length. If might not be
# available yet or the column might be gone now, in which case
# try again in the except block.
try:
return len(OrderedDict.__getitem__(self.columns, self._first_colname))
except (AttributeError, KeyError):
if len(self.columns) == 0:
return 0
# Get the first column name
self._first_colname = next(iter(self.columns))
return len(self.columns[self._first_colname])
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError(f"Column {name} does not exist")
def add_column(self, col, index=None, name=None, rename_duplicate=False, copy=True,
default_name=None):
"""
Add a new column to the table using ``col`` as input. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
The ``col`` input can be any data object which is acceptable as a
`~astropy.table.Table` column object or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
To add several columns at once use ``add_columns()`` or simply call
``add_column()`` for each one. There is very little performance difference
in the two approaches.
Parameters
----------
col : object
Data object for the new column
index : int or `None`
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
default_name : str or `None`
Name to use if both ``name`` and ``col.info.name`` are not available.
Defaults to ``col{number_of_columns}``.
Examples
--------
Create a table with two columns 'a' and 'b', then create a third column 'c'
and append it to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> t.add_column(['a', 'b'], name='d', index=1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(1.1, name='b', rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.1
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(['a', 'b'])
>>> t.add_column(col_c, name='d')
>>> print(t)
a b col2 d
--- --- ---- ---
1 0.1 a x
2 0.2 b y
"""
if default_name is None:
default_name = 'col{}'.format(len(self.columns))
# Convert col data to acceptable object for insertion into self.columns.
# Note that along with the lines above and below, this allows broadcasting
# of scalars to the correct shape for adding to table.
col = self._convert_data_to_col(col, name=name, copy=copy,
default_name=default_name)
# Make col data shape correct for scalars. The second test is to allow
# broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]].
if (col.shape == () or col.shape[0] == 1) and len(self) > 0:
new_shape = (len(self),) + getattr(col, 'shape', ())[1:]
if isinstance(col, np.ndarray):
col = np.broadcast_to(col, shape=new_shape,
subok=True)
elif isinstance(col, ShapedLikeNDArray):
col = col._apply(np.broadcast_to, shape=new_shape,
subok=True)
# broadcast_to() results in a read-only array. Apparently it only changes
# the view to look like the broadcasted array. So copy.
col = col_copy(col)
name = col.info.name
# Ensure that new column is the right length
if len(self.columns) > 0 and len(col) != len(self):
raise ValueError('Inconsistent data column lengths')
if rename_duplicate:
orig_name = name
i = 1
while name in self.columns:
# Iterate until a unique name is found
name = orig_name + '_' + str(i)
i += 1
col.info.name = name
# Set col parent_table weakref and ensure col has mask attribute if table.masked
self._set_col_parent_table_and_mask(col)
# Add new column as last column
self.columns[name] = col
if index is not None:
# Move the other cols to the right of the new one
move_names = self.colnames[index:-1]
for move_name in move_names:
self.columns.move_to_end(move_name, last=True)
def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False):
"""
Add a list of new columns the table using ``cols`` data objects. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
The ``cols`` input can include any data objects which are acceptable as
`~astropy.table.Table` column objects or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
From a performance perspective there is little difference between calling
this method once or looping over the new columns and calling ``add_column()``
for each column.
Parameters
----------
cols : list of objects
List of data objects for the new columns
indexes : list of ints or `None`
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
Examples
--------
Create a table with two columns 'a' and 'b', then create columns 'c' and 'd'
and append them to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> col_d = Column(name='d', data=['u', 'v'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'],
... indexes=[0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'),
... rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([[1, 2], col_b])
>>> t.add_columns([[3, 4], col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
1 u 3 u
2 v 4 v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError('Number of names must match number of cols')
default_names = ['col{}'.format(ii + len(self.columns))
for ii in range(len(cols))]
for ii in reversed(np.argsort(indexes)):
self.add_column(cols[ii], index=indexes[ii], name=names[ii],
default_name=default_names[ii],
rename_duplicate=rename_duplicate, copy=copy)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn(f"replaced column '{name}'",
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col, copy=True):
"""
Replace column ``name`` with the new ``col`` object.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
name : str
Name of column to replace
col : column object (list, ndarray, Column, etc)
New column object to replace the existing column
copy : bool
Make copy of the input ``col``, default=True
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError(f'column name {name} is not in the table')
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
col = self._convert_data_to_col(col, name=name, copy=copy)
self._set_col_parent_table_and_mask(col)
# Ensure that new column is the right length, unless it is the only column
# in which case re-sizing is allowed.
if len(self.columns) > 1 and len(col) != len(self[name]):
raise ValueError('length of new column must match table length')
self.columns.__setitem__(name, col, validated=True)
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice, int, or array of ints
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def iterrows(self, *names):
"""
Iterate over rows of table returning a tuple of values for each row.
This method is especially useful when only a subset of columns are needed.
The ``iterrows`` method can be substantially faster than using the standard
Table row iteration (e.g. ``for row in tbl:``), since that returns a new
``~astropy.table.Row`` object for each row and accessing a column in that
row (e.g. ``row['col0']``) is slower than tuple access.
Parameters
----------
names : list
List of column names (default to all columns if no names provided)
Returns
-------
rows : iterator returning tuples of row values
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table({'a': [1, 2, 3],
... 'b': [1.0, 2.5, 3.0],
... 'c': ['x', 'y', 'z']})
To iterate row-wise using column names::
>>> for a, c in t.iterrows('a', 'c'):
... print(a, c)
1 x
2 y
3 z
"""
if len(names) == 0:
names = self.colnames
else:
for name in names:
if name not in self.colnames:
raise ValueError(f'{name} is not a valid column name')
cols = (self[name] for name in names)
out = zip(*cols)
return out
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError(f"Column {name} does not exist")
for name in names:
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
for col in self.itercols():
if col.dtype.kind == in_kind:
try:
# This requires ASCII and is faster by a factor of up to ~8, so
# try that first.
newcol = col.__class__(col, dtype=out_kind)
except (UnicodeEncodeError, UnicodeDecodeError):
newcol = col.__class__(encode_decode_func(col, 'utf-8'))
# Quasi-manually copy info attributes. Unfortunately
# DataInfo.__set__ does not do the right thing in this case
# so newcol.info = col.info does not get the old info attributes.
for attr in col.info.attr_names - col.info._attrs_no_copy - set(['dtype']):
value = deepcopy(getattr(col.info, attr))
setattr(newcol.info, attr, value)
self[col.name] = newcol
def convert_bytestring_to_unicode(self):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U')
using UTF-8 encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
self._convert_string_dtype('S', 'U', np.char.decode)
def convert_unicode_to_bytestring(self):
"""
Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S')
using UTF-8 encoding.
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings.
"""
self._convert_string_dtype('U', 'S', np.char.encode)
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : list
A list containing the names of the columns to keep. All other
columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Specifying only a single column name keeps only this column.
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Specifying a list of column names is keeps is also possible.
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError(f"Column {name} does not exist")
remove = list(set(self.keys()) - set(names))
self.remove_columns(remove)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError(f"Column {name} does not exist")
self.columns[name].info.name = new_name
def rename_columns(self, names, new_names):
'''
Rename multiple columns.
Parameters
----------
names : list, tuple
A list or tuple of existing column names.
new_names : list, tuple
A list or tuple of new column names.
Examples
--------
Create a table with three columns 'a', 'b', 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming columns 'a' to 'aa' and 'b' to 'bb'::
>>> names = ('a','b')
>>> new_names = ('aa','bb')
>>> t.rename_columns(names, new_names)
>>> print(t)
aa bb c
--- --- ---
1 3 5
2 4 6
'''
if not self._is_list_or_tuple_of_str(names):
raise TypeError("input 'names' must be a tuple or a list of column names")
if not self._is_list_or_tuple_of_str(new_names):
raise TypeError("input 'new_names' must be a tuple or a list of column names")
if len(names) != len(new_names):
raise ValueError("input 'names' and 'new_names' list arguments must be the same length")
for name, new_name in zip(names, new_names):
self.rename_column(name, new_name)
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError('right hand side must be a sequence of values with '
'the same length as the number of selected columns')
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {} is out of bounds for table with length {}"
.format(index, N))
if index < 0:
index += N
def _is_mapping(obj):
"""Minimal checker for mapping (dict-like) interface for obj"""
attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items')
return all(hasattr(obj, attr) for attr in attrs)
if _is_mapping(vals) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not _is_mapping(mask):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError(f"Value must be supplied for column '{name}'")
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or _is_mapping(mask)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
columns = self.TableColumns()
try:
# Insert val at index for each column
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
# If new val is masked and the existing column does not support masking
# then upgrade the column to a mask-enabled type: either the table-level
# default ColumnClass or else MaskedColumn.
if mask_ and isinstance(col, Column) and not isinstance(col, MaskedColumn):
col_cls = (self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn)
col = col_cls(col, copy=False)
newcol = col.insert(index, val, axis=0)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {} after inserting {}'
' (expected {}, got {})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed and possible
if mask_:
if hasattr(newcol, 'mask'):
newcol[index] = np.ma.masked
else:
raise TypeError("mask was supplied for column '{}' but it does not "
"support masked values".format(col.info.name))
columns[name] = newcol
# insert row in indices
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{}':\n{}"
.format(name, err))
else:
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, names=keys)
if index is not None:
return index.sorted_data()
kwargs = {}
if keys:
kwargs['order'] = keys
if kind:
kwargs['kind'] = kind
if keys:
data = self.as_array(names=keys)
else:
data = self.as_array()
idx = data.argsort(**kwargs)
if reverse:
return idx[::-1]
return idx
def sort(self, keys=None, reverse=False):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel',
in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys)
if reverse:
indexes = indexes[::-1]
sort_index = get_index(self, names=keys)
if sort_index is not None:
# avoid inefficient relabelling of sorted index
prev_frozen = sort_index._frozen
sort_index._frozen = True
for name, col in self.columns.items():
# Make a new sorted column. This requires that take() also copies
# relevant info attributes for mixin columns.
new_col = col.take(indexes, axis=0)
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9553 and #9536 for discussion.
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
if sort_index is not None:
# undo index freeze
sort_index._frozen = prev_frozen
# now relabel the sort index appropriately
sort_index.sort()
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
col[:] = col[::-1]
for index in self.indices:
index.reverse()
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
return self._rows_equal(other)
def __ne__(self, other):
return ~self.__eq__(other)
def _rows_equal(self, other):
"""
Row-wise comparison of table with any other object.
This is actual implementation for __eq__.
Returns a 1-D boolean numpy array showing result of row-wise comparison.
This is the same as the ``==`` comparison for tables.
Parameters
----------
other : Table or DataFrame or ndarray
An object to compare with table
Examples
--------
Comparing one Table with other::
>>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t1._rows_equal(t2)
array([ True, True])
"""
if isinstance(other, Table):
other = other.as_array()
if self.has_masked_columns:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def values_equal(self, other):
"""
Element-wise comparison of table with another table, list, or scalar.
Returns a ``Table`` with the same columns containing boolean values
showing result of comparison.
Parameters
----------
other : Table-like object or list or scalar
Object to compare with table
Examples
--------
Compare one Table with other::
>>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c'))
>>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c'))
>>> t1.values_equal(t2)
<Table length=2>
a b c
bool bool bool
---- ----- -----
True False False
True True True
"""
if isinstance(other, Table):
names = other.colnames
else:
try:
other = Table(other, copy=False)
names = other.colnames
except Exception:
# Broadcast other into a dict, so e.g. other = 2 will turn into
# other = {'a': 2, 'b': 2} and then equality does a
# column-by-column broadcasting.
names = self.colnames
other = {name: other for name in names}
# Require column names match but do not require same column order
if set(self.colnames) != set(names):
raise ValueError('cannot compare tables with different column names')
eqs = []
for name in names:
try:
np.broadcast(self[name], other[name]) # Check if broadcast-able
# Catch the numpy FutureWarning related to equality checking,
# "elementwise comparison failed; returning scalar instead, but
# in the future will perform elementwise comparison". Turn this
# into an exception since the scalar answer is not what we want.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
eq = self[name] == other[name]
if (warns and issubclass(warns[-1].category, FutureWarning) and
'elementwise comparison failed' in str(warns[-1].message)):
raise FutureWarning(warns[-1].message)
except Exception as err:
raise ValueError(f'unable to compare column {name}') from err
# Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just
# broken and completely ignores that it should return an array.
if not (isinstance(eq, np.ndarray) and
eq.dtype is np.dtype('bool') and
len(eq) == len(self)):
raise TypeError(f'comparison for column {name} returned {eq} '
f'instead of the expected boolean ndarray')
eqs.append(eq)
out = Table(eqs, names=names)
return out
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to unique
values of the ``keys`` grouping object. The output is a new
`~astropy.table.TableGroups` which contains a copy of this table but
sorted by row according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `~astropy.table.Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `~astropy.table.Table`
Key grouping object
Returns
-------
out : `~astropy.table.Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self, index=None):
"""
Return a :class:`pandas.DataFrame` instance
The index of the created DataFrame is controlled by the ``index``
argument. For ``index=True`` or the default ``None``, an index will be
specified for the DataFrame if there is a primary key index on the
Table *and* if it corresponds to a single column. If ``index=False``
then no DataFrame index will be specified. If ``index`` is the name of
a column in the table then that will be the DataFrame index.
In additional to vanilla columns or masked columns, this supports Table
mixin columns like Quantity, Time, or SkyCoord. In many cases these
objects have no analog in pandas and will be converted to a "encoded"
representation using only Column or MaskedColumn. The exception is
Time or TimeDelta columns, which will be converted to the corresponding
representation in pandas using ``np.datetime64`` or ``np.timedelta64``.
See the example below.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : None, bool, str
Specify DataFrame index mode
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table has multi-dimensional columns
Examples
--------
Here we convert a table with a few mixins to a
:class:`pandas.DataFrame` instance.
>>> import pandas as pd
>>> from astropy.table import QTable
>>> import astropy.units as u
>>> from astropy.time import Time, TimeDelta
>>> from astropy.coordinates import SkyCoord
>>> q = [1, 2] * u.m
>>> tm = Time([1998, 2002], format='jyear')
>>> sc = SkyCoord([5, 6], [7, 8], unit='deg')
>>> dt = TimeDelta([3, 200] * u.s)
>>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt'])
>>> df = t.to_pandas(index='tm')
>>> with pd.option_context('display.max_columns', 20):
... print(df)
q sc.ra sc.dec dt
tm
1998-01-01 1.0 5.0 7.0 00:00:03
2002-01-01 2.0 6.0 8.0 00:03:20
"""
from pandas import DataFrame
if index is not False:
if index in (None, True):
# Default is to use the table primary key if available and a single column
if self.primary_key and len(self.primary_key) == 1:
index = self.primary_key[0]
else:
index = False
else:
if index not in self.colnames:
raise ValueError('index must be None, False, True or a table '
'column name')
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from . import serialize
from astropy.utils.data_info import MixinInfo, serialize_context_as
from astropy.time import Time, TimeDelta
# Convert any Time or TimeDelta columns and pay attention to masking
time_cols = [col for col in tbl.itercols() if isinstance(col, Time)]
if time_cols:
# Make a light copy of table and clear any indices
new_cols = []
for col in tbl.itercols():
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
tbl = tbl.__class__(new_cols, copy=False)
for col in time_cols:
if isinstance(col, TimeDelta):
# Convert to nanoseconds (matches astropy datetime64 support)
new_col = (col.sec * 1e9).astype('timedelta64[ns]')
nat = np.timedelta64('NaT')
else:
new_col = col.datetime64.copy()
nat = np.datetime64('NaT')
if col.masked:
new_col[col.mask] = nat
tbl[col.info.name] = new_col
# Convert the table to one with no mixins, only Column objects.
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
tbl = _encode_mixins(self)
badcols = [name for name, col in self.columns.items() if len(col.shape) > 1]
if badcols:
raise ValueError(
f'Cannot convert a table with multidimensional columns to a '
f'pandas DataFrame. Offending columns are: {badcols}\n'
f'One can filter out such columns using:\n'
f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n'
f'tbl[names].to_pandas(...)')
out = OrderedDict()
for name, column in tbl.columns.items():
if isinstance(column, MaskedColumn) and np.any(column.mask):
if column.dtype.kind in ['i', 'u']:
out[name] = column.astype(float).filled(np.nan)
warnings.warn(
"converted column '{}' from integer to float".format(
name), TableReplaceWarning, stacklevel=3)
elif column.dtype.kind in ['f', 'c']:
out[name] = column.filled(np.nan)
else:
out[name] = column.astype(object).filled(np.nan)
else:
out[name] = column
if out[name].dtype.byteorder not in ('=', '|'):
out[name] = out[name].byteswap().newbyteorder()
kwargs = {'index': out.pop(index)} if index else {}
return DataFrame(out, **kwargs)
@classmethod
def from_pandas(cls, dataframe, index=False):
"""
Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance
In addition to converting generic numeric or string columns, this supports
conversion of pandas Date and Time delta columns to `~astropy.time.Time`
and `~astropy.time.TimeDelta` columns, respectively.
Parameters
----------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : bool
Include the index column in the returned table (default=False)
Returns
-------
table : `~astropy.table.Table`
A `~astropy.table.Table` (or subclass) instance
Raises
------
ImportError
If pandas is not installed
Examples
--------
Here we convert a :class:`pandas.DataFrame` instance
to a `~astropy.table.QTable`.
>>> import numpy as np
>>> import pandas as pd
>>> from astropy.table import QTable
>>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]')
>>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]'))
>>> df = pd.DataFrame({'time': time})
>>> df['dt'] = dt
>>> df['x'] = [3., 4.]
>>> with pd.option_context('display.max_columns', 20):
... print(df)
time dt x
0 1998-01-01 00:00:01 3.0
1 2002-01-01 00:05:00 4.0
>>> QTable.from_pandas(df)
<QTable length=2>
time dt x
object object float64
----------------------- ------ -------
1998-01-01T00:00:00.000 1.0 3.0
2002-01-01T00:00:00.000 300.0 4.0
"""
out = OrderedDict()
names = list(dataframe.columns)
columns = [dataframe[name] for name in names]
datas = [np.array(column) for column in columns]
masks = [np.array(column.isnull()) for column in columns]
if index:
index_name = dataframe.index.name or 'index'
while index_name in names:
index_name = '_' + index_name + '_'
names.insert(0, index_name)
columns.insert(0, dataframe.index)
datas.insert(0, np.array(dataframe.index))
masks.insert(0, np.zeros(len(dataframe), dtype=bool))
for name, column, data, mask in zip(names, columns, datas, masks):
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
# Numpy datetime64
if data.dtype.kind == 'M':
from astropy.time import Time
out[name] = Time(data, format='datetime64')
if np.any(mask):
out[name][mask] = np.ma.masked
out[name].format = 'isot'
# Numpy timedelta64
elif data.dtype.kind == 'm':
from astropy.time import TimeDelta
data_sec = data.astype('timedelta64[ns]').astype(np.float64) / 1e9
out[name] = TimeDelta(data_sec, format='sec')
if np.any(mask):
out[name][mask] = np.ma.masked
else:
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask)
else:
out[name] = Column(data=data, name=name)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`~astropy.table.QTable` provides a class for heterogeneous tabular data
which can be easily modified, for instance adding columns or new rows.
The `~astropy.table.QTable` class is identical to `~astropy.table.Table`
except that columns with an associated ``unit`` attribute are converted to
`~astropy.units.Quantity` objects.
See also:
- http://docs.astropy.org/en/stable/table/
- http://docs.astropy.org/en/stable/table/mixin_columns.html
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if isinstance(col, Column) and getattr(col, 'unit', None) is not None:
# What to do with MaskedColumn with units: leave as MaskedColumn or
# turn into Quantity and drop mask? Assuming we have masking support
# in Quantity someday, let's drop the mask (consistent with legacy
# behavior) but issue a warning.
if isinstance(col, MaskedColumn) and np.any(col.mask):
warnings.warn("dropping mask in Quantity column '{}': "
"masked Quantity not supported".format(col.info.name))
# We need to turn the column into a quantity, or a subclass
# identified in the unit (such as u.mag()).
q_cls = getattr(col.unit, '_quantity_class', Quantity)
qcol = q_cls(col.data, col.unit, copy=False)
qcol.info = col.info
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
class NdarrayMixin(np.ndarray):
"""
Mixin column class to allow storage of arbitrary numpy
ndarrays within a Table. This is a subclass of numpy.ndarray
and has the same initialization options as ndarray().
"""
info = ParentDtypeInfo()
def __new__(cls, obj, *args, **kwargs):
self = np.array(obj, *args, **kwargs).view(cls)
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
return self
def __array_finalize__(self, obj):
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle NdarrayMixin objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
| bsd-3-clause |
setten/pymatgen | pymatgen/analysis/phase_diagram.py | 3 | 76238 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import re
import csv
import collections
import itertools
from io import open
import math
from six.moves import zip
import warnings
from monty.json import MSONable, MontyDecoder
from monty.string import unicode2str
from monty.functools import lru_cache
from monty.dev import deprecated
import numpy as np
from scipy.spatial import ConvexHull
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element, DummySpecie, get_el_sp
from pymatgen.util.coord import Simplex, in_coord_list
from pymatgen.util.string import latexify
from pymatgen.util.plotting import pretty_plot
from pymatgen.analysis.reaction_calculator import Reaction, \
ReactionError
"""
This module defines tools to generate and analyze phase diagrams.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "May 16, 2011"
class PDEntry(MSONable):
"""
An object encompassing all relevant data for phase diagrams.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
Args:
comp: Composition as a pymatgen.core.structure.Composition
energy: Energy for composition.
name: Optional parameter to name the entry. Defaults to the reduced
chemical formula.
attribute: Optional attribute of the entry. This can be used to
specify that the entry is a newly found compound, or to specify a
particular label for the entry, or else ... Used for further
analysis and plotting purposes. An attribute can be anything
but must be MSONable.
"""
def __init__(self, composition, energy, name=None, attribute=None):
self.energy = energy
self.composition = Composition(composition)
self.name = name if name else self.composition.reduced_formula
self.attribute = attribute
@property
def energy_per_atom(self):
"""
Returns the final energy per atom.
"""
return self.energy / self.composition.num_atoms
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.composition.is_element
def __repr__(self):
return "PDEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"composition": self.composition.as_dict(),
"energy": self.energy,
"name": self.name,
"attribute": self.attribute}
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.as_dict() == other.as_dict()
else:
return False
def __hash__(self):
return id(self)
@classmethod
def from_dict(cls, d):
return cls(Composition(d["composition"]), d["energy"],
d["name"] if "name" in d else None,
d["attribute"] if "attribute" in d else None)
@staticmethod
def to_csv(filename, entries, latexify_names=False):
"""
Exports PDEntries to a csv
Args:
filename: Filename to write to.
entries: PDEntries to export.
latexify_names: Format entry names to be LaTex compatible,
e.g., Li_{2}O
"""
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = sorted(list(elements), key=lambda a: a.X)
writer = csv.writer(open(filename, "wb"), delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Name"] + elements + ["Energy"])
for entry in entries:
row = [entry.name if not latexify_names
else re.sub(r"([0-9]+)", r"_{\1}", entry.name)]
row.extend([entry.composition[el] for el in elements])
row.append(entry.energy)
writer.writerow(row)
@staticmethod
def from_csv(filename):
"""
Imports PDEntries from a csv.
Args:
filename: Filename to import from.
Returns:
List of Elements, List of PDEntries
"""
with open(filename, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
entries = list()
header_read = False
for row in reader:
if not header_read:
elements = row[1:(len(row) - 1)]
header_read = True
else:
name = row[0]
energy = float(row[-1])
comp = dict()
for ind in range(1, len(row) - 1):
if float(row[ind]) > 0:
comp[Element(elements[ind - 1])] = float(row[ind])
entries.append(PDEntry(Composition(comp), energy, name))
elements = [Element(el) for el in elements]
return elements, entries
class GrandPotPDEntry(PDEntry):
"""
A grand potential pd entry object encompassing all relevant data for phase
diagrams. Chemical potentials are given as a element-chemical potential
dict.
Args:
entry: A PDEntry-like object.
chempots: Chemical potential specification as {Element: float}.
name: Optional parameter to name the entry. Defaults to the reduced
chemical formula of the original entry.
"""
def __init__(self, entry, chempots, name=None):
comp = entry.composition
self.original_entry = entry
self.original_comp = comp
grandpot = entry.energy - sum([comp[el] * pot
for el, pot in chempots.items()])
self.chempots = chempots
new_comp_map = {el: comp[el] for el in comp.elements
if el not in chempots}
super(GrandPotPDEntry, self).__init__(new_comp_map, grandpot,
entry.name)
self.name = name if name else entry.name
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.original_comp.is_element
def __repr__(self):
chempot_str = " ".join(["mu_%s = %.4f" % (el, mu)
for el, mu in self.chempots.items()])
return "GrandPotPDEntry with original composition " + \
"{}, energy = {:.4f}, {}".format(self.original_entry.composition,
self.original_entry.energy,
chempot_str)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"chempots": {el.symbol: u for el, u in self.chempots.items()},
"name": self.name}
@classmethod
def from_dict(cls, d):
chempots = {Element(symbol): u for symbol, u in d["chempots"].items()}
entry = MontyDecoder().process_decoded(d["entry"])
return cls(entry, chempots, d["name"])
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
class PDEntryIO(object):
@staticmethod
def to_csv(*args, **kwargs):
warnings.warn(
"PDEntryIO.from_csv and PDEntryIO.to_csv has been moved to "
"PDEntry.from_csv and PDEntryIO.to_csv. This stub will be "
"removed in pmg 2018.01.01.")
PDEntry.to_csv(*args, **kwargs)
@staticmethod
def from_csv(*args, **kwargs):
warnings.warn(
"PDEntryIO.from_csv and PDEntryIO.to_csv has been moved to "
"PDEntry.from_csv and PDEntryIO.to_csv. This stub will be "
"removed in pmg 2018.01.01.")
return PDEntry.from_csv(*args, **kwargs)
class TransformedPDEntry(PDEntry):
"""
This class repesents a TransformedPDEntry, which allows for a PDEntry to be
transformed to a different composition coordinate space. It is used in the
construction of phase diagrams that do not have elements as the terminal
compositions.
Args:
comp: Transformed composition as a Composition.
energy: Energy for composition.
original_entry: Original entry that this entry arose from.
"""
def __init__(self, comp, original_entry):
super(TransformedPDEntry, self).__init__(comp, original_entry.energy)
self.original_entry = original_entry
self.name = original_entry.name
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
def __repr__(self):
output = ["TransformedPDEntry {}".format(self.composition),
" with original composition {}"
.format(self.original_entry.composition),
", E = {:.4f}".format(self.original_entry.energy)]
return "".join(output)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"composition": self.composition}
@classmethod
def from_dict(cls, d):
entry = MontyDecoder().process_decoded(d["entry"])
return cls(d["composition"], entry)
class PhaseDiagram(MSONable):
"""
Simple phase diagram class taking in elements and entries as inputs.
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
.. attribute: elements:
Elements in the phase diagram.
..attribute: all_entries
All entries provided for Phase Diagram construction. Note that this
does not mean that all these entries are actually used in the phase
diagram. For example, this includes the positive formation energy
entries that are filtered out before Phase Diagram construction.
.. attribute: qhull_data
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
.. attribute: qhull_entries:
Actual entries used in convex hull. Excludes all positive formation
energy entries.
.. attribute: dim
The dimensionality of the phase diagram.
.. attribute: facets
Facets of the phase diagram in the form of [[1,2,3],[4,5,6]...].
For a ternary, it is the indices (references to qhull_entries and
qhull_data) for the vertices of the phase triangles. Similarly
extended to higher D simplices for higher dimensions.
.. attribute: el_refs:
List of elemental references for the phase diagrams. These are
entries corresponding to the lowest energy element entries for simple
compositional phase diagrams.
.. attribute: simplices:
The simplices of the phase diagram as a list of np.ndarray, i.e.,
the list of stable compositional coordinates in the phase diagram.
"""
# Tolerance for determining if formation energy is positive.
formation_energy_tol = 1e-11
numerical_tol = 1e-8
def __init__(self, entries, elements=None):
"""
Standard constructor for phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = list(elements)
dim = len(elements)
get_reduced_comp = lambda e: e.composition.reduced_composition
entries = sorted(entries, key=get_reduced_comp)
el_refs = {}
min_entries = []
all_entries = []
for c, g in itertools.groupby(entries, key=get_reduced_comp):
g = list(g)
min_entry = min(g, key=lambda e: e.energy_per_atom)
if c.is_element:
el_refs[c.elements[0]] = min_entry
min_entries.append(min_entry)
all_entries.extend(g)
if len(el_refs) != dim:
raise PhaseDiagramError(
"There are no entries associated with a terminal element!.")
data = np.array([
[e.composition.get_atomic_fraction(el) for el in elements] + [
e.energy_per_atom]
for e in min_entries
])
# Use only entries with negative formation energy
vec = [el_refs[el].energy_per_atom for el in elements] + [-1]
form_e = -np.dot(data, vec)
inds = np.where(form_e < -self.formation_energy_tol)[0].tolist()
# Add the elemental references
inds.extend([min_entries.index(el) for el in el_refs.values()])
qhull_entries = [min_entries[i] for i in inds]
qhull_data = data[inds][:, 1:]
# Add an extra point to enforce full dimensionality.
# This point will be present in all upper hull facets.
extra_point = np.zeros(dim) + 1 / dim
extra_point[-1] = np.max(qhull_data) + 1
qhull_data = np.concatenate([qhull_data, [extra_point]], axis=0)
if dim == 1:
self.facets = [qhull_data.argmin(axis=0)]
else:
facets = get_facets(qhull_data)
finalfacets = []
for facet in facets:
# Skip facets that include the extra point
if max(facet) == len(qhull_data) - 1:
continue
m = qhull_data[facet]
m[:, -1] = 1
if abs(np.linalg.det(m)) > 1e-14:
finalfacets.append(facet)
self.facets = finalfacets
self.simplexes = [Simplex(qhull_data[f, :-1]) for f in self.facets]
self.all_entries = all_entries
self.qhull_data = qhull_data
self.dim = dim
self.el_refs = el_refs
self.elements = elements
self.qhull_entries = qhull_entries
self._stable_entries = set(self.qhull_entries[i] for i in
set(itertools.chain(*self.facets)))
def pd_coords(self, comp):
"""
The phase diagram is generated in a reduced dimensional space
(n_elements - 1). This function returns the coordinates in that space.
These coordinates are compatible with the stored simplex objects.
"""
if set(comp.elements).difference(self.elements):
raise ValueError('{} has elements not in the phase diagram {}'
''.format(comp, self.elements))
return np.array(
[comp.get_atomic_fraction(el) for el in self.elements[1:]])
@property
def all_entries_hulldata(self):
data = []
for entry in self.all_entries:
comp = entry.composition
row = [comp.get_atomic_fraction(el) for el in self.elements]
row.append(entry.energy_per_atom)
data.append(row)
return np.array(data)[:, 1:]
@property
def unstable_entries(self):
"""
Entries that are unstable in the phase diagram. Includes positive
formation energy entries.
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def stable_entries(self):
"""
Returns the stable entries in the phase diagram.
"""
return self._stable_entries
def get_form_energy(self, entry):
"""
Returns the formation energy for an entry (NOT normalized) from the
elemental references.
Args:
entry: A PDEntry-like object.
Returns:
Formation energy from the elemental references.
"""
c = entry.composition
return entry.energy - sum([c[el] * self.el_refs[el].energy_per_atom
for el in c.elements])
def get_form_energy_per_atom(self, entry):
"""
Returns the formation energy per atom for an entry from the
elemental references.
Args:
entry: An PDEntry-like object
Returns:
Formation energy **per atom** from the elemental references.
"""
return self.get_form_energy(entry) / entry.composition.num_atoms
def __repr__(self):
return self.__str__()
def __str__(self):
symbols = [el.symbol for el in self.elements]
output = ["{} phase diagram".format("-".join(symbols)),
"{} stable phases: ".format(len(self.stable_entries)),
", ".join([entry.name
for entry in self.stable_entries])]
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = [PDEntry.from_dict(dd) for dd in d["all_entries"]]
elements = [Element.from_dict(dd) for dd in d["elements"]]
return cls(entries, elements)
@lru_cache(1)
def _get_facet_and_simplex(self, comp):
"""
Get any facet that a composition falls into. Cached so successive
calls at same composition are fast.
"""
c = self.pd_coords(comp)
for f, s in zip(self.facets, self.simplexes):
if s.in_simplex(c, PhaseDiagram.numerical_tol / 10):
return f, s
raise RuntimeError("No facet found for comp = {}".format(comp))
def _get_facet_chempots(self, facet):
"""
Calculates the chemical potentials for each element within a facet.
Args:
facet: Facet of the phase diagram.
Returns:
{ element: chempot } for all elements in the phase diagram.
"""
complist = [self.qhull_entries[i].composition for i in facet]
energylist = [self.qhull_entries[i].energy_per_atom for i in facet]
m = [[c.get_atomic_fraction(e) for e in self.elements] for c in
complist]
chempots = np.linalg.solve(m, energylist)
return dict(zip(self.elements, chempots))
def get_decomposition(self, comp):
"""
Provides the decomposition at a particular composition.
Args:
comp: A composition
Returns:
Decomposition as a dict of {Entry: amount}
"""
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
return {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
def get_hull_energy(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition. Not
normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
"""
e = 0
for k, v in self.get_decomposition(comp).items():
e += k.energy_per_atom * v
return e * comp.num_atoms
def get_decomp_and_e_above_hull(self, entry, allow_negative=False):
"""
Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry: A PDEntry like object
allow_negative: Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0. The decomposition is provided as a dict of
{Entry: amount}.
"""
if entry in self.stable_entries:
return {entry: 1}, 0
comp = entry.composition
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
decomp = {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
energies = [self.qhull_entries[i].energy_per_atom for i in facet]
ehull = entry.energy_per_atom - np.dot(decomp_amts, energies)
if allow_negative or ehull >= -PhaseDiagram.numerical_tol:
return decomp, ehull
raise ValueError("No valid decomp found!")
def get_e_above_hull(self, entry):
"""
Provides the energy above convex hull for an entry
Args:
entry: A PDEntry like object
Returns:
Energy above convex hull of entry. Stable entries should have
energy above hull of 0.
"""
return self.get_decomp_and_e_above_hull(entry)[1]
def get_equilibrium_reaction_energy(self, entry):
"""
Provides the reaction energy of a stable entry from the neighboring
equilibrium stable entries (also known as the inverse distance to
hull).
Args:
entry: A PDEntry like object
Returns:
Equilibrium reaction energy of entry. Stable entries should have
equilibrium reaction energy <= 0.
"""
if entry not in self.stable_entries:
raise ValueError("Equilibrium reaction energy is available only "
"for stable entries.")
if entry.is_element:
return 0
entries = [e for e in self.stable_entries if e != entry]
modpd = PhaseDiagram(entries, self.elements)
return modpd.get_decomp_and_e_above_hull(entry,
allow_negative=True)[1]
def get_composition_chempots(self, comp):
facet = self._get_facet_and_simplex(comp)[0]
return self._get_facet_chempots(facet)
@deprecated(get_composition_chempots)
def get_facet_chempots(self, facet):
return self._get_facet_chempots(facet)
def get_transition_chempots(self, element):
"""
Get the critical chemical potentials for an element in the Phase
Diagram.
Args:
element: An element. Has to be in the PD in the first place.
Returns:
A sorted sequence of critical chemical potentials, from less
negative to more negative.
"""
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with "
"elements in the phase diagram.")
critical_chempots = []
for facet in self.facets:
chempots = self._get_facet_chempots(facet)
critical_chempots.append(chempots[element])
clean_pots = []
for c in sorted(critical_chempots):
if len(clean_pots) == 0:
clean_pots.append(c)
else:
if abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol:
clean_pots.append(c)
clean_pots.reverse()
return tuple(clean_pots)
def get_critical_compositions(self, comp1, comp2):
"""
Get the critical compositions along the tieline between two
compositions. I.e. where the decomposition products change.
The endpoints are also returned.
Args:
comp1, comp2 (Composition): compositions that define the tieline
Returns:
[(Composition)]: list of critical compositions. All are of
the form x * comp1 + (1-x) * comp2
"""
n1 = comp1.num_atoms
n2 = comp2.num_atoms
pd_els = self.elements
# the reduced dimensionality Simplexes don't use the
# first element in the PD
c1 = self.pd_coords(comp1)
c2 = self.pd_coords(comp2)
# none of the projections work if c1 == c2, so just return *copies*
# of the inputs
if np.all(c1 == c2):
return [comp1.copy(), comp2.copy()]
intersections = [c1, c2]
for sc in self.simplexes:
intersections.extend(sc.line_intersection(c1, c2))
intersections = np.array(intersections)
# find position along line
l = (c2 - c1)
l /= np.sum(l ** 2) ** 0.5
proj = np.dot(intersections - c1, l)
# only take compositions between endpoints
proj = proj[np.logical_and(proj > -self.numerical_tol,
proj < proj[1] + self.numerical_tol)]
proj.sort()
# only unique compositions
valid = np.ones(len(proj), dtype=np.bool)
valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol
proj = proj[valid]
ints = c1 + l * proj[:, None]
# reconstruct full-dimensional composition array
cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T,
ints], axis=-1)
# mixing fraction when compositions are normalized
x = proj / np.dot(c2 - c1, l)
# mixing fraction when compositions are not normalized
x_unnormalized = x * n1 / (n2 + x * (n1 - n2))
num_atoms = n1 + (n2 - n1) * x_unnormalized
cs *= num_atoms[:, None]
return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs]
def get_element_profile(self, element, comp, comp_tol=1e-5):
"""
Provides the element evolution data for a composition.
For example, can be used to analyze Li conversion voltages by varying
uLi and looking at the phases formed. Also can be used to analyze O2
evolution by varying uO2.
Args:
element: An element. Must be in the phase diagram.
comp: A Composition
comp_tol: The tolerance to use when calculating decompositions.
Phases with amounts less than this tolerance are excluded.
Defaults to 1e-5.
Returns:
Evolution data as a list of dictionaries of the following format:
[ {'chempot': -10.487582010000001, 'evolution': -2.0,
'reaction': Reaction Object], ...]
"""
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with"
" elements in the phase diagram.")
gccomp = Composition({el: amt for el, amt in comp.items()
if el != element})
elref = self.el_refs[element]
elcomp = Composition(element.symbol)
evolution = []
for cc in self.get_critical_compositions(elcomp, gccomp)[1:]:
decomp_entries = self.get_decomposition(cc).keys()
decomp = [k.composition for k in decomp_entries]
rxn = Reaction([comp], decomp + [elcomp])
rxn.normalize_to(comp)
c = self.get_composition_chempots(cc + elcomp * 1e-5)[element]
amt = -rxn.coeffs[rxn.all_comp.index(elcomp)]
evolution.append({'chempot': c,
'evolution': amt,
'element_reference': elref,
'reaction': rxn, 'entries': decomp_entries})
return evolution
def get_chempot_range_map(self, elements, referenced=True, joggle=True):
"""
Returns a chemical potential range map for each stable entry.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges
of all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: If True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
Returns a dict of the form {entry: [simplices]}. The list of
simplices are the sides of the N-1 dim polytope bounding the
allowable chemical potential range of each entry.
"""
all_chempots = []
pd = self
facets = pd.facets
for facet in facets:
chempots = self._get_facet_chempots(facet)
all_chempots.append([chempots[el] for el in pd.elements])
inds = [pd.elements.index(el) for el in elements]
el_energies = {el: 0.0 for el in elements}
if referenced:
el_energies = {el: pd.el_refs[el].energy_per_atom
for el in elements}
chempot_ranges = collections.defaultdict(list)
vertices = [list(range(len(self.elements)))]
if len(all_chempots) > len(self.elements):
vertices = get_facets(all_chempots, joggle=joggle)
for ufacet in vertices:
for combi in itertools.combinations(ufacet, 2):
data1 = facets[combi[0]]
data2 = facets[combi[1]]
common_ent_ind = set(data1).intersection(set(data2))
if len(common_ent_ind) == len(elements):
common_entries = [pd.qhull_entries[i]
for i in common_ent_ind]
data = np.array([[all_chempots[i][j]
- el_energies[pd.elements[j]]
for j in inds] for i in combi])
sim = Simplex(data)
for entry in common_entries:
chempot_ranges[entry].append(sim)
return chempot_ranges
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=1e-2):
"""
returns a set of chemical potentials corresponding to the vertices of the simplex
in the chemical potential phase diagram.
The simplex is built using all elements in the target_composition except dep_elt.
The chemical potential of dep_elt is computed from the target composition energy.
This method is useful to get the limiting conditions for
defects computations for instance.
Args:
target_comp: A Composition object
dep_elt: the element for which the chemical potential is computed from the energy of
the stable phase at the target composition
tol_en: a tolerance on the energy to set
Returns:
[{Element:mu}]: An array of conditions on simplex vertices for
which each element has a chemical potential set to a given
value. "absolute" values (i.e., not referenced to element energies)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != dep_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != dep_elt])
for e in self.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != dep_elt]
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[dep_elt] / target_comp[dep_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
elts = [e for e in self.elements if e != dep_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = v[i] + muref[i]
res[dep_elt] = (np.dot(v + muref, coeff) + ef) / \
target_comp[dep_elt]
already_in = False
for di in all_coords:
dict_equals = True
for k in di:
if abs(di[k] - res[k]) > tol_en:
dict_equals = False
break
if dict_equals:
already_in = True
break
if not already_in:
all_coords.append(res)
return all_coords
def get_chempot_range_stability_phase(self, target_comp, open_elt):
"""
returns a set of chemical potentials correspoding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element:(mu_min,mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != open_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != open_elt])
for e in self.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != open_elt]
max_open = -float('inf')
min_open = float('inf')
max_mus = None
min_mus = None
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[open_elt] / target_comp[open_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
all_coords.append(v)
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] > max_open:
max_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
max_mus = v
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] < min_open:
min_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
min_mus = v
elts = [e for e in self.elements if e != open_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = (min_mus[i] + muref[i], max_mus[i] + muref[i])
res[open_elt] = (min_open, max_open)
return res
class GrandPotentialPhaseDiagram(PhaseDiagram):
"""
A class representing a Grand potential phase diagram. Grand potential phase
diagrams are essentially phase diagrams that are open to one or more
components. To construct such phase diagrams, the relevant free energy is
the grand potential, which can be written as the Legendre transform of the
Gibbs free energy as follows
Grand potential = G - u_X N_X
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
def __init__(self, entries, chempots, elements=None):
"""
Standard constructor for grand potential phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
chempots {Element: float}: Specify the chemical potentials
of the open elements.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
self.chempots = {get_el_sp(el): u for el, u in chempots.items()}
elements = set(elements).difference(self.chempots.keys())
all_entries = []
for e in entries:
if len(set(e.composition.elements).intersection(set(elements))) > 0:
all_entries.append(GrandPotPDEntry(e, self.chempots))
super(GrandPotentialPhaseDiagram, self).__init__(all_entries, elements)
def __str__(self):
output = []
chemsys = "-".join([el.symbol for el in self.elements])
output.append("{} grand potential phase diagram with ".format(chemsys))
output[-1] += ", ".join(["u{}={}".format(el, v)
for el, v in self.chempots.items()])
output.append("{} stable phases: ".format(len(self.stable_entries)))
output.append(", ".join([entry.name
for entry in self.stable_entries]))
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"chempots": self.chempots,
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = MontyDecoder().process_decoded(d["all_entries"])
elements = MontyDecoder().process_decoded(d["elements"])
return cls(entries, d["chempots"], elements)
class CompoundPhaseDiagram(PhaseDiagram):
"""
Generates phase diagrams from compounds as terminations instead of
elements.
"""
# Tolerance for determining if amount of a composition is positive.
amount_tol = 1e-5
def __init__(self, entries, terminal_compositions,
normalize_terminal_compositions=True):
"""
Initializes a CompoundPhaseDiagram.
Args:
entries ([PDEntry]): Sequence of input entries. For example,
if you want a Li2O-P2O5 phase diagram, you might have all
Li-P-O entries as an input.
terminal_compositions ([Composition]): Terminal compositions of
phase space. In the Li2O-P2O5 example, these will be the
Li2O and P2O5 compositions.
normalize_terminal_compositions (bool): Whether to normalize the
terminal compositions to a per atom basis. If normalized,
the energy above hulls will be consistent
for comparison across systems. Non-normalized terminals are
more intuitive in terms of compositional breakdowns.
"""
self.original_entries = entries
self.terminal_compositions = terminal_compositions
self.normalize_terminals = normalize_terminal_compositions
(pentries, species_mapping) = \
self.transform_entries(entries, terminal_compositions)
self.species_mapping = species_mapping
super(CompoundPhaseDiagram, self).__init__(
pentries, elements=species_mapping.values())
def transform_entries(self, entries, terminal_compositions):
"""
Method to transform all entries to the composition coordinate in the
terminal compositions. If the entry does not fall within the space
defined by the terminal compositions, they are excluded. For example,
Li3PO4 is mapped into a Li2O:1.5, P2O5:0.5 composition. The terminal
compositions are represented by DummySpecies.
Args:
entries: Sequence of all input entries
terminal_compositions: Terminal compositions of phase space.
Returns:
Sequence of TransformedPDEntries falling within the phase space.
"""
new_entries = []
if self.normalize_terminals:
fractional_comp = [c.fractional_composition
for c in terminal_compositions]
else:
fractional_comp = terminal_compositions
# Map terminal compositions to unique dummy species.
sp_mapping = collections.OrderedDict()
for i, comp in enumerate(fractional_comp):
sp_mapping[comp] = DummySpecie("X" + chr(102 + i))
for entry in entries:
try:
rxn = Reaction(fractional_comp, [entry.composition])
rxn.normalize_to(entry.composition)
# We only allow reactions that have positive amounts of
# reactants.
if all([rxn.get_coeff(comp) <= CompoundPhaseDiagram.amount_tol
for comp in fractional_comp]):
newcomp = {sp_mapping[comp]: -rxn.get_coeff(comp)
for comp in fractional_comp}
newcomp = {k: v for k, v in newcomp.items()
if v > CompoundPhaseDiagram.amount_tol}
transformed_entry = \
TransformedPDEntry(Composition(newcomp), entry)
new_entries.append(transformed_entry)
except ReactionError:
# If the reaction can't be balanced, the entry does not fall
# into the phase space. We ignore them.
pass
return new_entries, sp_mapping
def as_dict(self):
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"original_entries": [e.as_dict() for e in self.original_entries],
"terminal_compositions": [c.as_dict()
for c in self.terminal_compositions],
"normalize_terminal_compositions":
self.normalize_terminals}
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
entries = dec.process_decoded(d["original_entries"])
terminal_compositions = dec.process_decoded(d["terminal_compositions"])
return cls(entries, terminal_compositions,
d["normalize_terminal_compositions"])
class PhaseDiagramError(Exception):
"""
An exception class for Phase Diagram generation.
"""
pass
def get_facets(qhull_data, joggle=False):
"""
Get the simplex facets for the Convex hull.
Args:
qhull_data (np.ndarray): The data from which to construct the convex
hull as a Nxd array (N being number of data points and d being the
dimension)
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
List of simplices of the Convex Hull.
"""
if joggle:
return ConvexHull(qhull_data, qhull_options="QJ i").simplices
else:
return ConvexHull(qhull_data, qhull_options="Qt i").simplices
class PDAnalyzer(object):
@deprecated(PhaseDiagram,
"All PDAnalyzer methods have been moved to PhaseDiagram itself."
" E.g., PDAnalyzer.get_e_above_hull is now simply "
"PhaseDiagram.get_e_above_hull. This stub will be removed in "
"pymatgen 2018.1.1")
def __init__(self, pd):
"""
Initializes analyzer with a PhaseDiagram.
Args:
pd: Phase Diagram to analyze.
"""
self._pd = pd
def __getattr__(self, item):
return getattr(self._pd, item)
class PDPlotter(object):
"""
A plotter class for phase diagrams.
Args:
phasediagram: PhaseDiagram object.
show_unstable (float): Whether unstable phases will be plotted as
well as red crosses. If a number > 0 is entered, all phases with
ehull < show_unstable will be shown.
\\*\\*plotkwargs: Keyword args passed to matplotlib.pyplot.plot. Can
be used to customize markers etc. If not set, the default is
{
"markerfacecolor": (0.2157, 0.4941, 0.7216),
"markersize": 10,
"linewidth": 3
}
"""
def __init__(self, phasediagram, show_unstable=0, **plotkwargs):
# note: palettable imports matplotlib
from palettable.colorbrewer.qualitative import Set1_3
self._pd = phasediagram
self._dim = len(self._pd.elements)
if self._dim > 4:
raise ValueError("Only 1-4 components supported!")
self.lines = uniquelines(self._pd.facets) if self._dim > 1 else \
[[self._pd.facets[0][0], self._pd.facets[0][0]]]
self.show_unstable = show_unstable
colors = Set1_3.mpl_colors
self.plotkwargs = plotkwargs or {
"markerfacecolor": colors[2],
"markersize": 10,
"linewidth": 3
}
@property
def pd_plot_data(self):
"""
Plot data for phase diagram.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
lines = []
stable_entries = {}
for line in self.lines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
if self._dim < 3:
x = [data[line[0]][0], data[line[1]][0]]
y = [pd.get_form_energy_per_atom(entry1),
pd.get_form_energy_per_atom(entry2)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord(data[line, 0:2])
else:
coord = tet_coord(data[line, 0:3])
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
all_entries = pd.all_entries
all_data = np.array(pd.all_entries_hulldata)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(0, len(all_entries)):
entry = all_entries[i]
if entry not in stable:
if self._dim < 3:
x = [all_data[i][0], all_data[i][0]]
y = [pd.get_form_energy_per_atom(entry),
pd.get_form_energy_per_atom(entry)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord([all_data[i, 0:2],
all_data[i, 0:2]])
else:
coord = tet_coord([all_data[i, 0:3], all_data[i, 0:3],
all_data[i, 0:3]])
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def get_plot(self, label_stable=True, label_unstable=True, ordering=None,
energy_colormap=None, process_attributes=False):
if self._dim < 4:
plt = self._get_2d_plot(label_stable, label_unstable, ordering,
energy_colormap,
process_attributes=process_attributes)
elif self._dim == 4:
plt = self._get_3d_plot(label_stable)
return plt
def show(self, *args, **kwargs):
"""
Draws the phase diagram using Matplotlib and show it.
Args:
\\*args: Passed to get_plot.
\\*\\*kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def _get_2d_plot(self, label_stable=True, label_unstable=True,
ordering=None, energy_colormap=None, vmin_mev=-60.0,
vmax_mev=60.0, show_colorbar=True,
process_attributes=False):
"""
Shows the plot using pylab. Usually I won't do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
plt = pretty_plot(8, 6)
from matplotlib.font_manager import FontProperties
if ordering is None:
(lines, labels, unstable) = self.pd_plot_data
else:
(_lines, _labels, _unstable) = self.pd_plot_data
(lines, labels, unstable) = order_phase_diagram(
_lines, _labels, _unstable, ordering)
if energy_colormap is None:
if process_attributes:
for x, y in lines:
plt.plot(x, y, "k-", linewidth=3, markeredgecolor="k")
# One should think about a clever way to have "complex"
# attributes with complex processing options but with a clear
# logic. At this moment, I just use the attributes to know
# whether an entry is a new compound or an existing (from the
# ICSD or from the MP) one.
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "ko", **self.plotkwargs)
else:
plt.plot(x, y, "k*", **self.plotkwargs)
else:
for x, y in lines:
plt.plot(x, y, "ko-", **self.plotkwargs)
else:
from matplotlib.colors import Normalize, LinearSegmentedColormap
from matplotlib.cm import ScalarMappable
for x, y in lines:
plt.plot(x, y, "k-", markeredgecolor="k")
vmin = vmin_mev / 1000.0
vmax = vmax_mev / 1000.0
if energy_colormap == 'default':
mid = - vmin / (vmax - vmin)
cmap = LinearSegmentedColormap.from_list(
'my_colormap', [(0.0, '#005500'), (mid, '#55FF55'),
(mid, '#FFAAAA'), (1.0, '#FF0000')])
else:
cmap = energy_colormap
norm = Normalize(vmin=vmin, vmax=vmax)
_map = ScalarMappable(norm=norm, cmap=cmap)
_energies = [self._pd.get_equilibrium_reaction_energy(entry)
for coord, entry in labels.items()]
energies = [en if en < 0.0 else -0.00000001 for en in _energies]
vals_stable = _map.to_rgba(energies)
ii = 0
if process_attributes:
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=12)
else:
plt.plot(x, y, "*", markerfacecolor=vals_stable[ii],
markersize=18)
ii += 1
else:
for x, y in labels.keys():
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=15)
ii += 1
font = FontProperties()
font.set_weight("bold")
font.set_size(24)
# Sets a nice layout depending on the type of PD. Also defines a
# "center" for the PD, which then allows the annotations to be spread
# out in a nice manner.
if len(self._pd.elements) == 3:
plt.axis("equal")
plt.xlim((-0.1, 1.2))
plt.ylim((-0.1, 1.0))
plt.axis("off")
center = (0.5, math.sqrt(3) / 6)
else:
all_coords = labels.keys()
miny = min([c[1] for c in all_coords])
ybuffer = max(abs(miny) * 0.1, 0.1)
plt.xlim((-0.1, 1.1))
plt.ylim((miny - ybuffer, ybuffer))
center = (0.5, miny / 2)
plt.xlabel("Fraction", fontsize=28, fontweight='bold')
plt.ylabel("Formation energy (eV/fu)", fontsize=28,
fontweight='bold')
for coords in sorted(labels.keys(), key=lambda x: -x[1]):
entry = labels[coords]
label = entry.name
# The follow defines an offset for the annotation text emanating
# from the center of the PD. Results in fairly nice layouts for the
# most part.
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 if np.linalg.norm(vec) != 0 \
else vec
valign = "bottom" if vec[1] > 0 else "top"
if vec[0] < -0.01:
halign = "right"
elif vec[0] > 0.01:
halign = "left"
else:
halign = "center"
if label_stable:
if process_attributes and entry.attribute == 'new':
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font,
color='g')
else:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font)
if self.show_unstable:
font = FontProperties()
font.set_size(16)
energies_unstable = [self._pd.get_e_above_hull(entry)
for entry, coord in unstable.items()]
if energy_colormap is not None:
energies.extend(energies_unstable)
vals_unstable = _map.to_rgba(energies_unstable)
ii = 0
for entry, coords in unstable.items():
ehull = self._pd.get_e_above_hull(entry)
if ehull < self.show_unstable:
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 \
if np.linalg.norm(vec) != 0 else vec
label = entry.name
if energy_colormap is None:
plt.plot(coords[0], coords[1], "ks", linewidth=3,
markeredgecolor="k", markerfacecolor="r",
markersize=8)
else:
plt.plot(coords[0], coords[1], "s", linewidth=3,
markeredgecolor="k",
markerfacecolor=vals_unstable[ii],
markersize=8)
if label_unstable:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign, color="b",
verticalalignment=valign,
fontproperties=font)
ii += 1
if energy_colormap is not None and show_colorbar:
_map.set_array(energies)
cbar = plt.colorbar(_map)
cbar.set_label(
'Energy [meV/at] above hull (in red)\nInverse energy ['
'meV/at] above hull (in green)',
rotation=-90, ha='left', va='center')
ticks = cbar.ax.get_yticklabels()
# cbar.ax.set_yticklabels(['${v}$'.format(
# v=float(t.get_text().strip('$'))*1000.0) for t in ticks])
f = plt.gcf()
f.set_size_inches((8, 6))
plt.subplots_adjust(left=0.09, right=0.98, top=0.98, bottom=0.07)
return plt
def _get_3d_plot(self, label_stable=True):
"""
Shows the plot using pylab. Usually I won"t do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(20)
(lines, labels, unstable) = self.pd_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = entry.name
if label_stable:
if len(entry.composition.elements) == 1:
ax.text(coords[0], coords[1], coords[2], label)
else:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(count, latexify(label)))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
ax.axis("off")
return plt
def write_image(self, stream, image_format="svg", **kwargs):
"""
Writes the phase diagram to an image in a stream.
Args:
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
\\*\\*kwargs: Pass through to get_plot functino.
"""
plt = self.get_plot(**kwargs)
f = plt.gcf()
f.set_size_inches((12, 10))
plt.savefig(stream, format=image_format)
def plot_chempot_range_map(self, elements, referenced=True):
"""
Plot the chemical potential range _map. Currently works only for
3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
"""
self.get_chempot_range_map_plot(elements, referenced=referenced).show()
def get_chempot_range_map_plot(self, elements, referenced=True):
"""
Returns a plot of the chemical potential range _map. Currently works
only for 3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
Returns:
A matplotlib plot object.
"""
plt = pretty_plot(12, 8)
chempot_ranges = self._pd.get_chempot_range_map(
elements, referenced=referenced)
missing_lines = {}
excluded_region = []
for entry, lines in chempot_ranges.items():
comp = entry.composition
center_x = 0
center_y = 0
coords = []
contain_zero = any([comp.get_atomic_fraction(el) == 0
for el in elements])
is_boundary = (not contain_zero) and \
sum([comp.get_atomic_fraction(el) for el in
elements]) == 1
for line in lines:
(x, y) = line.coords.transpose()
plt.plot(x, y, "k-")
for coord in line.coords:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
center_x += coord[0]
center_y += coord[1]
if is_boundary:
excluded_region.extend(line.coords)
if coords and contain_zero:
missing_lines[entry] = coords
else:
xy = (center_x / len(coords), center_y / len(coords))
plt.annotate(latexify(entry.name), xy, fontsize=22)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Shade the forbidden chemical potential regions.
excluded_region.append([xlim[1], ylim[1]])
excluded_region = sorted(excluded_region, key=lambda c: c[0])
(x, y) = np.transpose(excluded_region)
plt.fill(x, y, "0.80")
# The hull does not generate the missing horizontal and vertical lines.
# The following code fixes this.
el0 = elements[0]
el1 = elements[1]
for entry, coords in missing_lines.items():
center_x = sum([c[0] for c in coords])
center_y = sum([c[1] for c in coords])
comp = entry.composition
is_x = comp.get_atomic_fraction(el0) < 0.01
is_y = comp.get_atomic_fraction(el1) < 0.01
n = len(coords)
if not (is_x and is_y):
if is_x:
coords = sorted(coords, key=lambda c: c[1])
for i in [0, -1]:
x = [min(xlim), coords[i][0]]
y = [coords[i][1], coords[i][1]]
plt.plot(x, y, "k")
center_x += min(xlim)
center_y += coords[i][1]
elif is_y:
coords = sorted(coords, key=lambda c: c[0])
for i in [0, -1]:
x = [coords[i][0], coords[i][0]]
y = [coords[i][1], min(ylim)]
plt.plot(x, y, "k")
center_x += coords[i][0]
center_y += min(ylim)
xy = (center_x / (n + 2), center_y / (n + 2))
else:
center_x = sum(coord[0] for coord in coords) + xlim[0]
center_y = sum(coord[1] for coord in coords) + ylim[0]
xy = (center_x / (n + 1), center_y / (n + 1))
plt.annotate(latexify(entry.name), xy,
horizontalalignment="center",
verticalalignment="center", fontsize=22)
plt.xlabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el0.symbol))
plt.ylabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el1.symbol))
plt.tight_layout()
return plt
def get_contour_pd_plot(self):
"""
Plot a contour phase diagram plot, where phase triangles are colored
according to degree of instability by interpolation. Currently only
works for 3-component phase diagrams.
Returns:
A matplotlib plot object.
"""
from scipy import interpolate
from matplotlib import cm
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
plt = self._get_2d_plot()
data[:, 0:2] = triangular_coord(data[:, 0:2]).transpose()
for i, e in enumerate(entries):
data[i, 2] = self._pd.get_e_above_hull(e)
gridsize = 0.005
xnew = np.arange(0, 1., gridsize)
ynew = np.arange(0, 1, gridsize)
f = interpolate.LinearNDInterpolator(data[:, 0:2], data[:, 2])
znew = np.zeros((len(ynew), len(xnew)))
for (i, xval) in enumerate(xnew):
for (j, yval) in enumerate(ynew):
znew[j, i] = f(xval, yval)
plt.contourf(xnew, ynew, znew, 1000, cmap=cm.autumn_r)
plt.colorbar()
return plt
def uniquelines(q):
"""
Given all the facets, convert it into a set of unique lines. Specifically
used for converting convex hull facets into line pairs of coordinates.
Args:
q: A 2-dim sequence, where each row represents a facet. E.g.,
[[1,2,3],[3,6,7],...]
Returns:
setoflines:
A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
"""
setoflines = set()
for facets in q:
for line in itertools.combinations(facets, 2):
setoflines.add(tuple(sorted(line)))
return setoflines
def triangular_coord(coord):
"""
Convert a 2D coordinate into a triangle-based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a triangular-based coordinate system.
"""
unitvec = np.array([[1, 0], [0.5, math.sqrt(3) / 2]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def tet_coord(coord):
"""
Convert a 3D coordinate into a tetrahedron based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a tetrahedron-based coordinate system.
"""
unitvec = np.array([[1, 0, 0], [0.5, math.sqrt(3) / 2, 0],
[0.5, 1.0 / 3.0 * math.sqrt(3) / 2, math.sqrt(6) / 3]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def order_phase_diagram(lines, stable_entries, unstable_entries, ordering):
"""
Orders the entries (their coordinates) in a phase diagram plot according
to the user specified ordering.
Ordering should be given as ['Up', 'Left', 'Right'], where Up,
Left and Right are the names of the entries in the upper, left and right
corners of the triangle respectively.
Args:
lines: list of list of coordinates for lines in the PD.
stable_entries: {coordinate : entry} for each stable node in the
phase diagram. (Each coordinate can only have one stable phase)
unstable_entries: {entry: coordinates} for all unstable nodes in the
phase diagram.
ordering: Ordering of the phase diagram, given as a list ['Up',
'Left','Right']
Returns:
(newlines, newstable_entries, newunstable_entries):
- newlines is a list of list of coordinates for lines in the PD.
- newstable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- newunstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
yup = -1000.0
xleft = 1000.0
xright = -1000.0
for coord in stable_entries:
if coord[0] > xright:
xright = coord[0]
nameright = stable_entries[coord].name
if coord[0] < xleft:
xleft = coord[0]
nameleft = stable_entries[coord].name
if coord[1] > yup:
yup = coord[1]
nameup = stable_entries[coord].name
if (not nameup in ordering) or (not nameright in ordering) or \
(not nameleft in ordering):
raise ValueError(
'Error in ordering_phase_diagram : \n"{up}", "{left}" and "{'
'right}"'
' should be in ordering : {ord}'.format(up=nameup, left=nameleft,
right=nameright,
ord=ordering))
cc = np.array([0.5, np.sqrt(3.0) / 6.0], np.float)
if nameup == ordering[0]:
if nameleft == ordering[1]:
# The coordinates were already in the user ordering
return lines, stable_entries, unstable_entries
else:
newlines = [[np.array(1.0 - x), y] for x, y in lines]
newstable_entries = {(1.0 - c[0], c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (1.0 - c[0], c[1])
for entry, c in
unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[1]:
if nameleft == ordering[2]:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c120 * (xx - cc[0]) - s120 * (y[ii] - cc[1]) + \
cc[0]
newy[ii] = s120 * (xx - cc[0]) + c120 * (y[ii] - cc[1]) + \
cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c120 * (xx - 1.0) - s120 * y[ii] + 1.0
newy[ii] = -s120 * (xx - 1.0) + c120 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[2]:
if nameleft == ordering[0]:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c240 * (xx - cc[0]) - s240 * (y[ii] - cc[1]) + \
cc[0]
newy[ii] = s240 * (xx - cc[0]) + c240 * (y[ii] - cc[1]) + \
cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c240 * xx - s240 * y[ii]
newy[ii] = -s240 * xx + c240 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
| mit |
herilalaina/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
fabioticconi/scikit-learn | examples/neighbors/plot_classification.py | 58 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
phoebe-project/phoebe2-docs | 2.3/tutorials/building_a_system.py | 2 | 7267 | #!/usr/bin/env python
# coding: utf-8
# Advanced: Building a System
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.Bundle()
# Default Systems
# ------------------------
#
# Although the default empty Bundle doesn't include a system, there are available
# constructors that create default systems. To create a simple binary with component tags
# 'binary', 'primary', and 'secondary' (as above), you could call [default_binary](../api/phoebe.frontend.bundle.Bundle.default_binary.md):
# In[3]:
b = phoebe.Bundle.default_binary()
# or for short:
# In[4]:
b = phoebe.default_binary()
# In[5]:
print(b.hierarchy)
# To build the same binary but as a contact system, you would call:
# In[6]:
b = phoebe.default_binary(contact_binary=True)
# In[7]:
print(b.hierarchy)
# For more details on dealing with contact binary systems, see the [Contact Binary Hierarchy Tutorial](contact_binary_hierarchy.ipynb) and the [Contact Binary Example Script](../examples/minimal_contact_binary.ipynb).
# Adding Components Manually
# --------------------
#
# **IMPORTANT**: in the vast majority of cases, starting with one of the default systems is sufficient. Below we will discuss the alternative method of building a system from scratch.
#
# By default, an empty [Bundle](../api/phoebe.frontend.bundle.Bundle.md) does not contain any information about our system.
#
# So, let's first start by adding a few stars. Here we'll call the generic [add_component](../api/phoebe.frontend.bundle.Bundle.add_component.md) method. This method works for any type of component in the system - stars, orbits, planets, disks, rings, spots, etc. The first argument needs to be a callable or the name of a callable in [phoebe.parameters.component](../api/phoebe.parameters.component.md) which include the following options:
#
# * orbit
# * star
# * envelope
#
# add_component also takes a keyword argument for the 'component' tag. Here we'll give them component tags 'primary' and 'secondary' - but note that these are merely convenience labels and do not hold any special roles. Some tags, however, are forbidden if they clash with other tags or reserved values - so if you get error stating the component tag is forbidden, try using a different string.
# In[8]:
b = phoebe.Bundle()
# In[9]:
b.add_component(phoebe.component.star, component='primary')
b.add_component('star', component='secondary')
# But there are also shortcut methods for [add_star](../api/phoebe.frontend.bundle.Bundle.add_star.md) and [add_orbit](../api/phoebe.frontend.bundle.Bundle.add_orbit.md). In these cases you don't need to provide the function, but only the component tag of your star/orbit.
#
# Any of these functions also accept values for any of the qualifiers of the created parameters.
# In[10]:
b.add_star('extrastarforfun', teff=6000)
# Here we call the add_component method of the bundle with several arguments:
#
# * a function (or the name of a function) in phoebe.parameters.component. This
# function tells the bundle what parameters need to be added.
# * component: the tag that we want to give this component for future reference.
# * any additional keyword arguments: you can also provide initial values for Parameters
# that you know will be created. In the last example you can see that the
# effective temperature will already be set to 6000 (in default units which is K).
#
# and then we'll do the same to add an orbit:
# In[11]:
b.add_orbit('binary')
# ## Defining the Hierarchy
#
#
# At this point all we've done is add a bunch of Parameters to our Bundle, but
# we still need to specify the hierarchical setup of our system.
#
# Here we want to place our two stars (with component tags 'primary' and 'secondary') in our
# orbit (with component tag 'binary'). This can be done with several different syntaxes sent to [b.set_hierarchy](../api/phoebe.frontend.bundle.Bundle.set_hierarchy.md):
# In[12]:
b.set_hierarchy(phoebe.hierarchy.binaryorbit, b['binary'], b['primary'], b['secondary'])
# or
# In[13]:
b.set_hierarchy(phoebe.hierarchy.binaryorbit(b['binary'], b['primary'], b['secondary']))
# If you access the value that this set via [get_hierarchy](../api/phoebe.frontend.bundle.Bundle.get_hierarchy.md), you'll see that it really just resulted
# in a simple string representation:
# In[14]:
b.get_hierarchy()
# We could just as easily have used this string to set the hierarchy:
# In[15]:
b.set_hierarchy('orbit:binary(star:primary, star:secondary)')
# If at any point we want to flip the primary and secondary components or make
# this binary a triple, its seriously as easy as changing this hierarchy and
# everything else will adjust as needed (including cross-ParameterSet constraints,
# and datasets)
# The Hierarchy Parameter
# -----------------------------
#
# Setting the hierarchy just sets the value of a single parameter (although it may take some time because it also does a lot of paperwork and manages constraints between components in the system). You can access that parameter as usual:
# In[16]:
b['hierarchy@system']
# or through any of these shortcuts:
# In[17]:
b.get_hierarchy()
# In[18]:
b.hierarchy
# This [HierarchyParameter](../api/phoebe.parameters.HierarchyParameter.md) then has several methods unique to itself. You can, for instance, list the component tags of all the stars or orbits in the hierarchy via [get_stars](../api/phoebe.parameters.HierarchyParameter.get_stars.md) or [get_orbits](../api/phoebe.parameters.HierarchyParameter.get_orbits.md), respectively:
# In[19]:
print(b.hierarchy.get_stars())
# In[20]:
print(b.hierarchy.get_orbits())
# Or you can ask for the component tag of the top-level item in the hierarchy via [get_top](../api/phoebe.parameters.HierarchyParameter.get_top.md).
# In[21]:
print(b.hierarchy.get_top())
# And request the parent, children, child, or sibling of any item in the hierarchy via [get_parent_of](../api/phoebe.parameters.HierarchyParameter.get_parent_of.md), [get_children_of](../api/phoebe.parameters.HierarchyParameter.get_children_of.md), or [get_sibling_of](../api/phoebe.parameters.HierarchyParameter.get_sibling_of.md).
# In[22]:
print(b.hierarchy.get_parent_of('primary'))
# In[23]:
print(b.hierarchy.get_children_of('binary'))
# In[24]:
print(b.hierarchy.get_child_of('binary', 0)) # here 0 means primary component, 1 means secondary
# In[25]:
print(b.hierarchy.get_sibling_of('primary'))
# We can also check whether a given component (by component tag) is the primary or secondary component in its parent orbit via [get_primary_or_secondary](../api/phoebe.parameters.HierarchyParameter.get_primary_or_secondary.md). Note that here its just a coincidence (although on purpose) that the component tag is also 'secondary'.
# In[26]:
print(b.hierarchy.get_primary_or_secondary('secondary'))
| gpl-3.0 |
IssamLaradji/scikit-learn | sklearn/externals/joblib/__init__.py | 10 | 4382 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.8.3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
zehpunktbarron/iOSMAnalyzer | scripts/c2_create_edit_point.py | 1 | 4390 | # -*- coding: utf-8 -*-
#!/usr/bin/python2.7
#title :All created and edited points
#description :This file creates a plot: How many points have been created or edited per month?
#author :Christopher Barron @ http://giscience.uni-hd.de/
#date :19.01.2013
#version :0.1
#usage :python pyscript.py
#==============================================================================
import psycopg2
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pylab
# import db connection parameters
import db_conn_para as db
###
### Connect to database with psycopg2. Add arguments from parser to the connection-string
###
try:
conn_string="dbname= %s user= %s host= %s password= %s" %(db.g_my_dbname, db.g_my_username, db.g_my_hostname, db.g_my_dbpassword)
print "Connecting to database\n->%s" % (conn_string)
# Verbindung mit der DB mittels psycopg2 herstellen
conn = psycopg2.connect(conn_string)
print "Connection to database was established succesfully"
except:
print "Connection to database failed"
###
### Execute SQL query (1)
###
# New cursor method for sql. We need two cur-cursors, one for each query!
cur = conn.cursor()
# Execute SQL query. For more than one row use three '"'
try:
cur.execute("""
-- Created points over time
SELECT count(t.id)::int AS count
,date_trunc('month', s.day)::date AS month
FROM (
SELECT generate_series(min(valid_from)::date
,max(valid_from)::date
,interval '1 day'
)::date AS day
FROM hist_point t
) s
LEFT JOIN hist_point t ON t.valid_from::date = s.day AND t.version = 1
GROUP BY month
ORDER BY month;
""")
# Getting a list of tuples from the database-cursor (cur)
data_tuples = []
for row in cur:
data_tuples.append(row)
except:
print "Query could not be executed"
datatypes = [('col1', 'i4'), ('date', 'S20')]
data1 = np.array(data_tuples, dtype=datatypes)
col1_1 = data1['col1']
###
### Execute SQL query (2)
###
# Mit dieser neuen "cursor Methode" koennen SQL-Abfragen abgefeuert werden
cur2 = conn.cursor()
# Execute SQL query. For more than one row use three '"'
try:
cur2.execute("""
-- Edited points over time
SELECT count(t.id)::int AS count
,date_trunc('month', s.day)::date AS month
FROM (
SELECT generate_series(min(valid_from)::date
,max(valid_from)::date
,interval '1 day'
)::date AS day
FROM hist_point t
) s
LEFT JOIN hist_point t ON t.valid_from::date = s.day AND NOT (t.version = 1) AND visible = 'true'
GROUP BY month
ORDER BY month;
""")
# Getting a list of tuples from the database-cursor (cur)
data_tuples2 = []
for row2 in cur2:
data_tuples2.append(row2)
except:
print "Query could not be executed"
datatypes2 = [('col1', 'i4'), ('date', 'S20')]
data2 = np.array(data_tuples2, dtype=datatypes2)
col1_2 = data2['col1']
###
### Plot (Multiline-Chart)
###
# Create Subplot
fig = plt.figure()
ax = fig.add_subplot(111)
# set figure size
fig.set_size_inches(10,6)
# Data-tuple and datatype
data1 = np.array(data_tuples, dtype=datatypes)
# Converts date to a manageable date-format for matplotlib
dates = mdates.num2date(mdates.datestr2num(data1['date']))
# Create barchart (x-axis=dates, y-axis=col1,
plt.plot(dates, col1_1, color = '#2dd700', linewidth=2, label='Created Nodes')
plt.plot(dates, col1_2, color = '#ff6700', linewidth=2, label='Node-Edits')
# Place a gray dashed grid behind the thicks (only for y-axis)
ax.yaxis.grid(color='gray', linestyle='dashed')
# Set this grid behind the thicks
ax.set_axisbelow(True)
# Rotate x-labels on the x-axis
fig.autofmt_xdate()
# Label x and y axis
plt.xlabel('Date')
plt.ylabel('Number of created Nodes and Node-Edits')
# Locate legend on the plot (http://matplotlib.org/users/legend_guide.html#legend-location)
# Shink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height * 0.9])
# Put a legend to the right of the current axis and reduce the font size
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop={'size':9})
# Plot-title
plt.title("Development of created Nodes and Node-Edits")
# Save plot to *.png-file
plt.savefig('pics/c2_create_edit_point.jpeg')
plt.clf()
| gpl-3.0 |
yanlend/scikit-learn | benchmarks/bench_plot_nmf.py | 90 | 5742 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, init='random'):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
init : string
Method used to initialize the procedure.
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
W, H = _initialize_nmf(V, r, init, random_state=0)
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init='random', max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, init='random', tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization'
'benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
ghorn/debian-casadi | experimental/joris/vdp_single_shooting.py | 1 | 2985 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from casadi import *
import numpy as NP
import matplotlib.pyplot as plt
from casadi.tools import *
nk = 4 # Control discretization
tf = 10.0 # End time
# Declare variables (use scalar graph)
t = ssym("t") # time
u = ssym("u") # control
x = ssym("x",3) # state
xd = ssym("xd",3) # state derivative
# ODE right hand side
rhs = vertcat( [(1 - x[1]*x[1])*x[0] - x[1] + u, \
x[0], \
x[0]*x[0] + x[1]*x[1] + u*u] )
# DAE residual function
f = SXFunction([t,x,u,xd],[rhs-xd])
# Create an integrator
f_d = CVodesIntegrator(f)
f_d.setOption("abstol",1e-8) # tolerance
f_d.setOption("reltol",1e-8) # tolerance
f_d.setOption("steps_per_checkpoint",1000)
f_d.setOption("tf",tf/nk) # final time
f_d.init()
# All controls (use matrix graph)
U = msym("U",nk) # nk-by-1 symbolic variable
# The initial state (x_0=0, x_1=1, x_2=0)
X = msym([0,1,0])
# State derivative (only relevant for DAEs)
Xp = msym([0,0,0])
# Build a graph of integrator calls
for k in range(nk):
[X,Xp] = f_d.call([X,U[k],Xp])
dotsave(X,filename='single.pdf')
# Objective function: x_2(T)
F = MXFunction([U],[X[2]])
# Terminal constraints: x_0(T)=x_1(T)=0
X_01 = X[0:2] # first two components of X
G = MXFunction([U],[X_01])
# Allocate an NLP solver
solver = IpoptSolver(F,G)
solver.setOption("monitor",["eval_f"])
solver.init()
# Set bounds and initial guess
solver.setInput(-0.75*NP.ones(nk), "lbx")
solver.setInput(1.0*NP.ones(nk), "ubx")
solver.setInput(NP.zeros(nk),"x0")
solver.setInput(NP.zeros(2),"lbg")
solver.setInput(NP.zeros(2),"ubg")
# Solve the problem
solver.solve()
# Retrieve the solution
u_opt = NP.array(solver.getOutput("x"))
# Time grid
tgrid_x = NP.linspace(0,10,nk+1)
tgrid_u = NP.linspace(0,10,nk)
# Plot the results
plt.figure(1)
plt.clf()
plt.plot(tgrid_u,u_opt,'-.')
plt.title("Van der Pol optimization - single shooting")
plt.xlabel('time')
plt.legend(['u trajectory'])
plt.grid()
plt.show()
| lgpl-3.0 |
RomainBrault/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 86 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three exemplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
yeatmanlab/BrainTools | projects/NLR_MEG/source_analysis_session2.py | 1 | 117065 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 12:05:40 2016
@author: sjjoo
"""
#%%
import sys
import mne
import matplotlib.pyplot as plt
import imageio
from mne.utils import run_subprocess, logger
import os
from os import path as op
import copy
import shutil
import numpy as np
from numpy.random import randn
from scipy import stats as stats
import scipy.io as sio
import time
from functools import partial
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne import set_config
import matplotlib.font_manager as font_manager
import csv
os.chdir('/home/sjjoo/git/BrainTools/projects/NLR_MEG')
from plotit import plotit
set_config('MNE_MEMMAP_MIN_SIZE', '1M')
set_config('MNE_CACHE_DIR', '.tmp')
mne.set_config('MNE_USE_CUDA', 'true')
this_env = copy.copy(os.environ)
fs_dir = '/mnt/diskArray/projects/avg_fsurfer'
this_env['SUBJECTS_DIR'] = fs_dir
raw_dir = '/mnt/scratch/NLR_MEG3'
os.chdir(raw_dir)
subs = ['NLR_102_RS','NLR_110_HH','NLR_145_AC','NLR_150_MG',
'NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_162_EF','NLR_163_LF',
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM',
'NLR_180_ZD','NLR_201_GS',
'NLR_204_AM','NLR_205_AC','NLR_207_AH','NLR_210_SB','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_GB267','NLR_JB420', 'NLR_HB275','NLR_GB355']
session2 = ['102_rs160815','110_hh160809',
'145_ac160823','150_mg160825',
'152_tc160623','160_ek160915','161_ak160916','162_ef160829','163_lf160920',
'164_sf160920','170_gm160822','172_th160825','174_hs160829','179_gm160913',
'180_zd160826','201_gs150925',
'204_am151120','205_ac160202','207_ah160809','210_sb160822','211_lb160823',
'nlr_gb310170829','nlr_kb218170829','nlr_gb267170911','nlr_jb420170828',
'nlr_hb275170828','nlr_gb355170907']
twre_index1 = [87,66,84,92,
86,63,81,53,60,
55,71,63,68,67,
64,79,
59,84,91,76,57,
67,77,80,53,72,85]
twre_index2 = [90,76,94,115,
85,75,82,64,75,
63,83,77,84,75,
68,79,
62,90,105,75,71,
69,83,76,62,73,94]
twre_index1 = np.array(twre_index1)
twre_index2 = np.array(twre_index2)
#age1 = [125.6885, 132.9501, 122.0434, 138.4349, 97.6347, 138.1420, 108.2457, 98.0631, 105.8147, 89.9132,
# 87.6465, 131.8660, 123.7174, 95.959, 112.416, 133.8042, 152.4639, 103.4823, 89.8475, 138.4020,
# 93.8568, 117.0814, 123.6202, 122.9304, 109.1656, 90.6058,
# 111.9593,86.0381,147.2063,95.8699,148.0802,122.5896,88.7162,123.0495,110.6645,105.3069,88.9143,95.2879,106.2852,
# 122.2915,114.4389,136.1496,128.6246,137.9216,122.7528]
#age1 = np.divide(age1, 12)
n_subjects = len(subs)
c_table = ( (0.6510, 0.8078, 0.8902), # Blue, Green, Red, Orange, Purple, yellow
(0.1216, 0.4706, 0.7059),
(0.6980, 0.8745, 0.5412),
(0.2000, 0.6275, 0.1725),
(0.9843, 0.6039, 0.6000),
(0.8902, 0.1020, 0.1098),
(0.9922, 0.7490, 0.4353),
(1.0000, 0.4980, 0),
(0.7922, 0.6980, 0.8392),
(0.4157, 0.2392, 0.6039),
(1.0000, 1.0000, 0.6000),
(0.6941, 0.3490, 0.1569))
fname_data = op.join(raw_dir, 'session2_data_loose_depth8_normal.npy')
#%%
""" Some checks """
n = 38
os.chdir(os.path.join(raw_dir,session2[n]))
os.chdir('inverse')
fn = 'All_40-sss_eq_'+session2[n]+'-ave.fif'
evoked = mne.read_evokeds(fn, condition=0,
baseline=(None,0), kind='average', proj=True)
info = evoked.info
if os.path.isdir('../forward'):
os.chdir('../forward')
trans = session2[n] + '-trans.fif'
# Take a look at the sensors
mne.viz.plot_trans(info, trans, subject=subs[n], dig=True,
meg_sensors=True, subjects_dir=fs_dir)
os.chdir(os.path.join(raw_dir,session2[n]))
os.chdir('epochs')
epo = mne.read_epochs('All_40-sss_'+session2[n]+'-epo.fif',proj='delayed')
epo.average().plot(proj='interactive')
#%%
"""
Here we load the data
"""
# Session 1
os.chdir(raw_dir)
X13 = np.load(fname_data)
orig_times = np.load('session2_times.npy')
tstep = np.load('session2_tstep.npy')
n_epochs = np.load('session2_n_averages.npy')
tmin = -0.1
#m1 = np.logical_and(np.transpose(twre_index) >= 85, np.transpose(age) <= 13)
#m2 = np.logical_and(np.transpose(twre_index) < 85, np.transpose(age) <= 13)
##m4 = np.logical_and(np.transpose(twre_index) >= 80, np.transpose(twre_index) < 90)
#m3 = np.mean(n_epochs,axis=1) < 40
#m1[np.where(m3)] = False
#m2[np.where(m3)] = False
#
#good_readers = np.where(m1)[0]
#poor_readers = np.where(m2)[0]
##middle_readers = np.where(m4)[0]
#
#a1 = np.transpose(age) > 9
#a2 = np.logical_not(a1)
#
#old_readers = np.where(a1)[0]
#young_readers = np.where(a2)[0]
#
#all_subject = []
#all_subject.extend(good_readers)
#all_subject.extend(poor_readers)
##all_subject.extend(middle_readers)
#all_subject.sort()
fs_vertices = [np.arange(10242)] * 2
#%%
""" Downsample the data """
sample = np.arange(0,len(orig_times),2)
times = orig_times[sample]
tstep = 2*tstep
X11 = X13[:,sample,:,:]
del X13
X11 = np.abs(X11)
#%%
""" Read HCP labels """
labels = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1', surf_name='white', subjects_dir=fs_dir) #regexp=aparc_label_name
#aparc_label_name = 'PHT_ROI'#'_IP'#'IFSp_ROI'#'STSvp_ROI'#'STSdp_ROI'#'PH_ROI'#'TE2p_ROI' #'SFL_ROI' #'IFSp_ROI' #'TE2p_ROI' #'inferiortemporal' #'pericalcarine'
anat_label = mne.read_labels_from_annot('fsaverage', parc='aparc',surf_name='white',
subjects_dir=fs_dir) #, regexp=aparc_label_name)
#%%
#TE2p_mask_lh = mne.Label.get_vertices_used(TE2p_label[0])
#TE2p_mask_rh = mne.Label.get_vertices_used(TE2p_label[1])
PHT_label_lh = [label for label in labels if label.name == 'L_PHT_ROI-lh'][0]
PHT_label_rh = [label for label in labels if label.name == 'R_PHT_ROI-rh'][0]
TE1p_label_lh = [label for label in labels if label.name == 'L_TE1p_ROI-lh'][0]
TE1p_label_rh = [label for label in labels if label.name == 'R_TE1p_ROI-rh'][0]
TE2p_label_lh = [label for label in labels if label.name == 'L_TE2p_ROI-lh'][0]
TE2p_label_rh = [label for label in labels if label.name == 'R_TE2p_ROI-rh'][0]
TE2a_label_lh = [label for label in labels if label.name == 'L_TE2a_ROI-lh'][0]
TE2a_label_rh = [label for label in labels if label.name == 'R_TE2a_ROI-rh'][0]
TF_label_lh = [label for label in labels if label.name == 'L_TF_ROI-lh'][0]
TF_label_rh = [label for label in labels if label.name == 'R_TF_ROI-rh'][0]
PH_label_lh = [label for label in labels if label.name == 'L_PH_ROI-lh'][0]
PH_label_rh = [label for label in labels if label.name == 'R_PH_ROI-rh'][0]
FFC_label_lh = [label for label in labels if label.name == 'L_FFC_ROI-lh'][0]
FFC_label_rh = [label for label in labels if label.name == 'R_FFC_ROI-rh'][0]
a8C_label_lh = [label for label in labels if label.name == 'L_8C_ROI-lh'][0]
a8C_label_rh = [label for label in labels if label.name == 'R_8C_ROI-rh'][0]
p946v_label_lh = [label for label in labels if label.name == 'L_p9-46v_ROI-lh'][0]
p946v_label_rh = [label for label in labels if label.name == 'R_p9-46v_ROI-rh'][0]
IFSp_label_lh = [label for label in labels if label.name == 'L_IFSp_ROI-lh'][0]
IFSp_label_rh = [label for label in labels if label.name == 'R_IFSp_ROI-rh'][0]
IFSa_label_lh = [label for label in labels if label.name == 'L_IFSa_ROI-lh'][0]
IFSa_label_rh = [label for label in labels if label.name == 'R_IFSa_ROI-rh'][0]
IFJp_label_lh = [label for label in labels if label.name == 'L_IFJp_ROI-lh'][0]
IFJp_label_rh = [label for label in labels if label.name == 'R_IFJp_ROI-rh'][0]
IFJa_label_lh = [label for label in labels if label.name == 'L_IFJa_ROI-lh'][0]
IFJa_label_rh = [label for label in labels if label.name == 'R_IFJa_ROI-rh'][0]
a45_label_lh = [label for label in labels if label.name == 'L_45_ROI-lh'][0]
a45_label_rh = [label for label in labels if label.name == 'R_45_ROI-rh'][0]
a44_label_lh = [label for label in labels if label.name == 'L_44_ROI-lh'][0]
a44_label_rh = [label for label in labels if label.name == 'R_44_ROI-rh'][0]
a43_label_lh = [label for label in labels if label.name == 'L_43_ROI-lh'][0]
a43_label_rh = [label for label in labels if label.name == 'R_43_ROI-rh'][0]
a9_46v_lh = [label for label in labels if label.name == 'L_a9-46v_ROI-lh'][0]
a9_46v_rh = [label for label in labels if label.name == 'R_a9-46v_ROI-rh'][0]
PGi_label_lh = [label for label in labels if label.name == 'L_PGi_ROI-lh'][0]
PGi_label_rh = [label for label in labels if label.name == 'R_PGi_ROI-rh'][0]
PGs_label_lh = [label for label in labels if label.name == 'L_PGs_ROI-lh'][0]
PGs_label_rh = [label for label in labels if label.name == 'R_PGs_ROI-rh'][0]
STSvp_label_lh = [label for label in labels if label.name == 'L_STSvp_ROI-lh'][0]
STSvp_label_rh = [label for label in labels if label.name == 'R_STSvp_ROI-rh'][0]
STSdp_label_lh = [label for label in labels if label.name == 'L_STSdp_ROI-lh'][0]
STSdp_label_rh = [label for label in labels if label.name == 'R_STSdp_ROI-rh'][0]
STSva_label_lh = [label for label in labels if label.name == 'L_STSva_ROI-lh'][0]
STSva_label_rh = [label for label in labels if label.name == 'R_STSva_ROI-rh'][0]
STSda_label_lh = [label for label in labels if label.name == 'L_STSda_ROI-lh'][0]
STSda_label_rh = [label for label in labels if label.name == 'R_STSda_ROI-rh'][0]
TPOJ1_label_lh = [label for label in labels if label.name == 'L_TPOJ1_ROI-lh'][0]
TPOJ1_label_rh = [label for label in labels if label.name == 'R_TPOJ1_ROI-rh'][0]
TPOJ2_label_lh = [label for label in labels if label.name == 'L_TPOJ2_ROI-lh'][0]
TPOJ2_label_rh = [label for label in labels if label.name == 'R_TPOJ2_ROI-rh'][0]
V1_label_lh = [label for label in labels if label.name == 'L_V1_ROI-lh'][0]
V1_label_rh = [label for label in labels if label.name == 'R_V1_ROI-rh'][0]
V4_label_lh = [label for label in labels if label.name == 'L_V4_ROI-lh'][0]
V4_label_rh = [label for label in labels if label.name == 'R_V4_ROI-rh'][0]
LIPd_label_lh = [label for label in labels if label.name == 'L_LIPd_ROI-lh'][0]
LIPd_label_rh = [label for label in labels if label.name == 'R_LIPd_ROI-rh'][0]
LIPv_label_lh = [label for label in labels if label.name == 'L_LIPv_ROI-lh'][0]
LIPv_label_rh = [label for label in labels if label.name == 'R_LIPv_ROI-rh'][0]
PBelt_label_lh = [label for label in labels if label.name == 'L_PBelt_ROI-lh'][0]
PBelt_label_rh = [label for label in labels if label.name == 'R_PBelt_ROI-rh'][0]
PSL_label_lh = [label for label in labels if label.name == 'L_PSL_ROI-lh'][0]
PSL_label_rh = [label for label in labels if label.name == 'R_PSL_ROI-rh'][0]
LBelt_label_lh = [label for label in labels if label.name == 'L_LBelt_ROI-lh'][0]
LBelt_label_rh = [label for label in labels if label.name == 'R_LBelt_ROI-rh'][0]
A1_label_lh = [label for label in labels if label.name == 'L_A1_ROI-lh'][0]
A1_label_rh = [label for label in labels if label.name == 'R_A1_ROI-rh'][0]
MBelt_label_lh = [label for label in labels if label.name == 'L_MBelt_ROI-lh'][0]
MBelt_label_rh = [label for label in labels if label.name == 'R_MBelt_ROI-rh'][0]
RI_label_lh = [label for label in labels if label.name == 'L_RI_ROI-lh'][0]
RI_label_rh = [label for label in labels if label.name == 'R_RI_ROI-rh'][0]
A4_label_lh = [label for label in labels if label.name == 'L_A4_ROI-lh'][0]
A4_label_rh = [label for label in labels if label.name == 'R_A4_ROI-rh'][0]
PFcm_label_lh = [label for label in labels if label.name == 'L_PFcm_ROI-lh'][0]
PFcm_label_rh = [label for label in labels if label.name == 'R_PFcm_ROI-rh'][0]
PFm_label_lh = [label for label in labels if label.name == 'L_PFm_ROI-lh'][0]
PFm_label_rh = [label for label in labels if label.name == 'R_PFm_ROI-rh'][0]
_4_label_lh = [label for label in labels if label.name == 'L_4_ROI-lh'][0]
_4_label_rh = [label for label in labels if label.name == 'R_4_ROI-rh'][0]
_1_label_lh = [label for label in labels if label.name == 'L_1_ROI-lh'][0]
_1_label_rh = [label for label in labels if label.name == 'R_1_ROI-rh'][0]
_2_label_lh = [label for label in labels if label.name == 'L_2_ROI-lh'][0]
_2_label_rh = [label for label in labels if label.name == 'R_2_ROI-rh'][0]
_3a_label_lh = [label for label in labels if label.name == 'L_3a_ROI-lh'][0]
_3a_label_rh = [label for label in labels if label.name == 'R_3a_ROI-rh'][0]
_3b_label_lh = [label for label in labels if label.name == 'L_3b_ROI-lh'][0]
_3b_label_rh = [label for label in labels if label.name == 'R_3b_ROI-rh'][0]
_43_label_lh = [label for label in labels if label.name == 'L_43_ROI-lh'][0]
_43_label_rh = [label for label in labels if label.name == 'R_43_ROI-rh'][0]
_6r_label_lh = [label for label in labels if label.name == 'L_6r_ROI-lh'][0]
_6r_label_rh = [label for label in labels if label.name == 'R_6r_ROI-rh'][0]
OP1_label_lh = [label for label in labels if label.name == 'L_OP1_ROI-lh'][0]
OP1_label_rh = [label for label in labels if label.name == 'R_OP1_ROI-rh'][0]
OP23_label_lh = [label for label in labels if label.name == 'L_OP2-3_ROI-lh'][0]
OP23_label_rh = [label for label in labels if label.name == 'R_OP2-3_ROI-rh'][0]
OP4_label_lh = [label for label in labels if label.name == 'L_OP4_ROI-lh'][0]
OP4_label_rh = [label for label in labels if label.name == 'R_OP4_ROI-rh'][0]
PFop_label_lh = [label for label in labels if label.name == 'L_PFop_ROI-lh'][0]
PFop_label_rh = [label for label in labels if label.name == 'R_PFop_ROI-rh'][0]
A5_label_lh = [label for label in labels if label.name == 'L_A5_ROI-lh'][0]
A5_label_rh = [label for label in labels if label.name == 'R_A5_ROI-rh'][0]
STV_label_lh = [label for label in labels if label.name == 'L_STV_ROI-lh'][0]
STV_label_rh = [label for label in labels if label.name == 'R_STV_ROI-rh'][0]
RI_label_lh = [label for label in labels if label.name == 'L_RI_ROI-lh'][0]
RI_label_rh = [label for label in labels if label.name == 'R_RI_ROI-rh'][0]
PF_label_lh = [label for label in labels if label.name == 'L_PF_ROI-lh'][0]
PF_label_rh = [label for label in labels if label.name == 'R_PF_ROI-rh'][0]
PFt_label_lh = [label for label in labels if label.name == 'L_PFt_ROI-lh'][0]
PFt_label_rh = [label for label in labels if label.name == 'R_PFt_ROI-rh'][0]
p47r_label_lh = [label for label in labels if label.name == 'L_p47r_ROI-lh'][0]
p47r_label_rh = [label for label in labels if label.name == 'R_p47r_ROI-rh'][0]
FOP5_label_lh = [label for label in labels if label.name == 'L_FOP5_ROI-lh'][0]
FOP5_label_rh = [label for label in labels if label.name == 'R_FOP5_ROI-rh'][0]
FOP4_label_lh = [label for label in labels if label.name == 'L_FOP4_ROI-lh'][0]
FOP4_label_rh = [label for label in labels if label.name == 'R_FOP4_ROI-rh'][0]
FOP3_label_lh = [label for label in labels if label.name == 'L_FOP3_ROI-lh'][0]
FOP3_label_rh = [label for label in labels if label.name == 'R_FOP3_ROI-rh'][0]
FOP2_label_lh = [label for label in labels if label.name == 'L_FOP2_ROI-lh'][0]
FOP2_label_rh = [label for label in labels if label.name == 'R_FOP2_ROI-rh'][0]
Ig_label_lh = [label for label in labels if label.name == 'L_Ig_ROI-lh'][0]
Ig_label_rh = [label for label in labels if label.name == 'R_Ig_ROI-rh'][0]
temp1_label_lh = [label for label in anat_label if label.name == 'parsopercularis-lh'][0]
temp1_label_rh = [label for label in anat_label if label.name == 'parsopercularis-rh'][0]
temp2_label_lh = [label for label in anat_label if label.name == 'parsorbitalis-lh'][0]
temp2_label_rh = [label for label in anat_label if label.name == 'parsorbitalis-rh'][0]
temp3_label_lh = [label for label in anat_label if label.name == 'parstriangularis-lh'][0]
temp3_label_rh = [label for label in anat_label if label.name == 'parstriangularis-rh'][0]
temp4_label_lh = [label for label in anat_label if label.name == 'precentral-lh'][0]
temp4_label_rh = [label for label in anat_label if label.name == 'precentral-rh'][0]
#%%
#new_data = X13[:,:,all_subject,:]
#data1 = np.subtract(np.mean(new_data[:,:,:,[5]],axis=3), np.mean(new_data[:,:,:,[0]],axis=3))
#data1 = np.mean(new_data[:,:,:,[5]],axis=3)
#del new_data
#lex_hC_lN = X13[:,:,:,5]
#dot_hC_lN = X13[:,:,:,0]
data11 = np.mean(X11[:,:,:,[5]],axis=3) - np.mean(X11[:,:,:,[8]],axis=3)
data11 = np.transpose(data11,[2,1,0])
#%%
stat_fun = partial(mne.stats.ttest_1samp_no_p)
s_group = all_subject
p_threshold = 0.05
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., len(s_group) - 1)
#subjects_dir = mne.utils.get_subjects_dir(subjects_dir, raise_error=True)
#label_dir = op.join(fs_dir, 'fsaverage', 'label')
#lh = mne.read_label(op.join(label_dir, 'lh.Medial_wall.label'))
#rh = mne.read_label(op.join(label_dir, 'rh.Medial_wall.label'))
#medial_vertices = np.concatenate((lh.vertices[lh.vertices < 10242], rh.vertices[rh.vertices < 10242] + 10242))
#lex_hC_lN[medial_vertices,:,:] = 0
#if concatenate is True:
# return np.concatenate((lh.vertices[lh.vertices < 10242],
# rh.vertices[rh.vertices < 10242] + 10242))
#else:
# return [lh.vertices, rh.vertices]
#
temp3 = mne.SourceEstimate(np.transpose(stat_fun(data11[s_group,:,:])), fs_vertices, tmin, tstep, subject='fsaverage')
brain3_1 = temp3.plot(hemi='lh', subjects_dir=fs_dir, views = 'lat', initial_time=0.40, #['lat','ven','med']
clim=dict(kind='value', lims=[1.5, t_threshold, 7])) #clim=dict(kind='value', lims=[2, t_threshold, 7]), size=(800,800))
#temp3 = mne.SourceEstimate(np.mean(np.abs(X11[:,:,:,5]),axis=2), fs_vertices, tmin, tstep, subject='fsaverage')
#
#brain3_1 = temp3.plot(hemi='lh', subjects_dir=fs_dir, views = ['lat','ven','med'], initial_time=0.26, #['lat','ven','med']
# clim=dict(kind='value', lims=[2, 2.5, 5])) #clim=dict(kind='value', lims=[2, t_threshold, 7]), size=(800,800))
brain3_1.save_movie('Lex_LH_free_depth8_all_Word_Normal.mp4',time_dilation = 6.0,framerate = 24)
"""
plot(self, subject=None, surface='inflated', hemi='lh', colormap='auto',
time_label='auto', smoothing_steps=10, transparent=None, alpha=1.0,
time_viewer=False, subjects_dir=None, figure=None, views='lat',
colorbar=True, clim='auto', cortex='classic', size=800, background='black',
foreground='white', initial_time=None, time_unit='s')
"""
#%%
""" Spatio-temporal clustering """
p_threshold = 0.05
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
s_space = mne.grade_to_tris(5)
# Left hemisphere
s_space_lh = s_space[s_space[:,0] < 10242]
connectivity = mne.spatial_tris_connectivity(s_space_lh, remap_vertices = True)
T_obs, clusters, cluster_p_values, H0 = clu = \
mne.stats.spatio_temporal_cluster_1samp_test(data11[:,:,0:10242], connectivity=connectivity, n_jobs=12,
threshold=t_threshold)
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
fsave_vertices = [np.arange(10242), np.array([], int)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
#%%
brain1 = stc_all_cluster_vis.plot(
hemi='lh', views='lateral', subjects_dir=fs_dir,
time_label='Duration significant (ms)', size=(800, 800),
smoothing_steps=5, clim=dict(kind='value', lims=[20, 40, 300]))
#brain1.add_label(A4_label_lh, borders=True, color=c_table[2])
#brain1.add_label(A5_label_lh, borders=True, color=c_table[2])
#brain1.add_label(STSdp_label_lh, borders=True, color=c_table[2])
#brain1.add_label(TPOJ1_label_lh, borders=True, color=c_table[2])
#brain1.add_label(PBelt_label_lh, borders=True, color=c_table[2])
#brain1.add_label(LBelt_label_lh, borders=True, color=c_table[2])
brain1.add_label(A1_label_lh, borders=True, color='k')
temp_auditory_label_l = mne.Label(A4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=A4_label_lh.pos,values= A4_label_lh.values) + \
mne.Label(A5_label_lh.vertices, hemi='lh',name=u'sts_l',pos=A5_label_lh.pos,values= A5_label_lh.values) + \
mne.Label(STSdp_label_lh.vertices, hemi='lh',name=u'sts_l',pos=STSdp_label_lh.pos,values= STSdp_label_lh.values)+ \
mne.Label(TPOJ1_label_lh.vertices, hemi='lh',name=u'sts_l',pos=TPOJ1_label_lh.pos,values= TPOJ1_label_lh.values)+ \
mne.Label(PBelt_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PBelt_label_lh.pos,values= PBelt_label_lh.values)+ \
mne.Label(LBelt_label_lh.vertices, hemi='lh',name=u'sts_l',pos=LBelt_label_lh.pos,values= LBelt_label_lh.values)
brain1.add_label(temp_auditory_label_l, borders=True, color=c_table[2])
lh_label = stc_all_cluster_vis.in_label(temp_auditory_label_l)
data = lh_label.data
lh_label.data[data <= 40] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
aud_vertices_l = temp.vertices[0]
new_label = mne.Label(aud_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[2])
#brain1.add_label(PFcm_label_lh, borders=True, color=c_table[0])
#brain1.add_label(PFop_label_lh, borders=True, color=c_table[0])
#brain1.add_label(RI_label_lh, borders=True, color=c_table[0])
#brain1.add_label(PF_label_lh, borders=True, color=c_table[0])
#brain1.add_label(PFt_label_lh, borders=True, color=c_table[0])
temp_auditory2_label_l = mne.Label(PFcm_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PFcm_label_lh.pos,values= PFcm_label_lh.values) + \
mne.Label(PFop_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PFop_label_lh.pos,values= PFop_label_lh.values) + \
mne.Label(RI_label_lh.vertices, hemi='lh',name=u'sts_l',pos=RI_label_lh.pos,values= RI_label_lh.values)+ \
mne.Label(PF_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PF_label_lh.pos,values= PF_label_lh.values)+ \
mne.Label(PFt_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PFt_label_lh.pos,values= PFt_label_lh.values)
brain1.add_label(temp_auditory2_label_l, borders=True, color=c_table[0])
lh_label = stc_all_cluster_vis.in_label(temp_auditory2_label_l)
data = lh_label.data
lh_label.data[data <= 40] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
aud2_vertices_l = temp.vertices[0]
new_label = mne.Label(aud2_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[0])
#brain1.add_label(_3a_label_lh, borders=True, color=c_table[4])
#brain1.add_label(_3b_label_lh, borders=True, color=c_table[4])
#brain1.add_label(_4_label_lh, borders=True, color=c_table[4])
temp_motor_label_l = mne.Label(_3a_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_3a_label_lh.pos,values= _3a_label_lh.values) + \
mne.Label(_3b_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_3b_label_lh.pos,values= _3b_label_lh.values) + \
mne.Label(_4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_4_label_lh.pos,values= _4_label_lh.values)
brain1.add_label(temp_motor_label_l, borders=True, color=c_table[4])
lh_label = stc_all_cluster_vis.in_label(temp_motor_label_l)
data = lh_label.data
lh_label.data[data <= 40] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
motor_vertices_l = temp.vertices[0]
new_label = mne.Label(motor_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[4])
#brain1.add_label(_6r_label_lh, borders=True, color=c_table[6])
#brain1.add_label(a44_label_lh, borders=True, color=c_table[6])
#brain1.add_label(a45_label_lh, borders=True, color=c_table[6])
temp_broca_label_l = mne.Label(_6r_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_6r_label_lh.pos,values= _6r_label_lh.values) + \
mne.Label(a44_label_lh.vertices, hemi='lh',name=u'sts_l',pos=a44_label_lh.pos,values= a44_label_lh.values) + \
mne.Label(a45_label_lh.vertices, hemi='lh',name=u'sts_l',pos=a45_label_lh.pos,values= a45_label_lh.values) + \
mne.Label(FOP4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP4_label_lh.pos,values= FOP4_label_lh.values) + \
mne.Label(FOP3_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP3_label_lh.pos,values= FOP3_label_lh.values) + \
mne.Label(FOP5_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP5_label_lh.pos,values= FOP5_label_lh.values)
brain1.add_label(temp_broca_label_l, borders=True, color=c_table[6])
lh_label = stc_all_cluster_vis.in_label(temp_broca_label_l)
data = lh_label.data
lh_label.data[data <= 40] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
broca_vertices_l = temp.vertices[0]
new_label = mne.Label(broca_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[6])
#brain1.add_label(FOP5_label_lh, borders=True, color=c_table[6])
#brain1.add_label(FOP4_label_lh, borders=True, color=c_table[6])
#brain1.add_label(FOP3_label_lh, borders=True, color=c_table[6])
#brain1.add_label(FOP2_label_lh, borders=True, color=c_table[6])
#
#brain1.add_label(p47r_label_lh, borders=True, color=c_table[8])
#brain1.add_label(IFSa_label_lh, borders=True, color=c_table[8])
#brain1.add_label(a9_46v_lh, borders=True, color=c_table[8])
temp_frontal_label_l = mne.Label(p47r_label_lh.vertices, hemi='lh',name=u'sts_l',pos=p47r_label_lh.pos,values= p47r_label_lh.values) + \
mne.Label(IFSa_label_lh.vertices, hemi='lh',name=u'sts_l',pos=IFSa_label_lh.pos,values= IFSa_label_lh.values) + \
mne.Label(a9_46v_lh.vertices, hemi='lh',name=u'sts_l',pos=a9_46v_lh.pos,values= a9_46v_lh.values)
brain1.add_label(temp_frontal_label_l, borders=True, color=c_table[8])
lh_label = stc_all_cluster_vis.in_label(temp_frontal_label_l)
data = lh_label.data
lh_label.data[data <= 40] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
frontal_vertices_l = temp.vertices[0]
new_label = mne.Label(frontal_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[8])
#brain1.add_label(OP23_label_lh, borders=True, color='k')
#brain1.add_label(Ig_label_lh, borders=True, color='k')
#brain1.add_label(OP4_label_lh, borders=True, color='k')
#brain1.add_label(OP1_label_lh, borders=True, color='k')
temp_sylvian_label_l = mne.Label(OP23_label_lh.vertices, hemi='lh',name=u'sts_l',pos=OP23_label_lh.pos,values= OP23_label_lh.values) + \
mne.Label(Ig_label_lh.vertices, hemi='lh',name=u'sts_l',pos=Ig_label_lh.pos,values= Ig_label_lh.values) + \
mne.Label(OP4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=OP4_label_lh.pos,values= OP4_label_lh.values) + \
mne.Label(OP1_label_lh.vertices, hemi='lh',name=u'sts_l',pos=OP1_label_lh.pos,values= OP1_label_lh.values) + \
mne.Label(FOP2_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP2_label_lh.pos,values= FOP2_label_lh.values)
brain1.add_label(temp_sylvian_label_l, borders=True, color=c_table[8])
lh_label = stc_all_cluster_vis.in_label(temp_sylvian_label_l)
data = lh_label.data
lh_label.data[data <= 40] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
sylvian_vertices_l = temp.vertices[0]
new_label = mne.Label(sylvian_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[8])
#%% Right hemisphere
s_space_rh = s_space[s_space[:,0] >= 10242]
connectivity = mne.spatial_tris_connectivity(s_space_rh, remap_vertices = True)
T_obs, clusters, cluster_p_values, H0 = clu = \
mne.stats.spatio_temporal_cluster_1samp_test(data11[:,:,10242:], connectivity=connectivity, n_jobs=18,
threshold=t_threshold)
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
fsave_vertices = [np.array([], int), np.arange(10242)]
stc_all_cluster_vis2 = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
brain2 = stc_all_cluster_vis2.plot(
hemi='rh', views='lateral', subjects_dir=fs_dir,
time_label='Duration significant (ms)', size=(800, 800),
smoothing_steps=5, clim=dict(kind='value', lims=[10, 100, 300]))
#%%
""" AUD 1 """
M = np.mean(np.mean(X11[aud_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(X11[aud_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(subs))
plt.figure(1)
plt.clf()
plt.subplot(1,2,1)
plt.hold(True)
plt.plot(times, M[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,5]-errM[:,5], M[:,5]+errM[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,8],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,8]-errM[:,8], M[:,8]+errM[:,8], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 3])
plt.title('Lexical task')
plt.subplot(1,2,2)
plt.hold(True)
plt.plot(times, M[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,0]-errM[:,0], M[:,0]+errM[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,3],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,3]-errM[:,3], M[:,3]+errM[:,3], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 3])
plt.title('Dot task')
#%%
""" AUD 2 """
M = np.mean(np.mean(X11[aud2_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(X11[aud2_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[aud2_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[aud2_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp2[aud2_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp2[aud2_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp2
plotit(times, M, M1, M2, errM, errM1, errM2, yMin=-0, yMax=3)
#%%
""" Motor """
M = np.mean(np.mean(X11[motor_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(X11[motor_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[motor_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[motor_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp2[motor_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp2[motor_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp2
plotit(times, M, M1, M2, errM, errM1, errM2, yMin=-0, yMax=3)
#%%
""" Broca """
M = np.mean(np.mean(X11[broca_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(X11[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp2[broca_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp2[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp2
plotit(times, M, M1, M2, errM, errM1, errM2, yMin=-0, yMax=3, title='Broca')
#%%
""" Sylvian """
M = np.mean(np.mean(X11[broca_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(X11[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp2[broca_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp2[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp2
plotit(times, M, M1, M2, errM, errM1, errM2, yMin=-0, yMax=3, title='Sylvian')
#%%
""" Frontal """
M = np.mean(np.mean(X11[frontal_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(X11[frontal_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[frontal_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[frontal_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp2[frontal_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp2[frontal_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp2
plotit(times, M, M1, M2, errM, errM1, errM2, yMin=-0, yMax=3, title='Frontal')
#%%
""" VTC """
task = 5
plt.figure(2)
plt.clf()
M = np.mean(np.mean(X11[vtc_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(X11[vtc_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[vtc_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[vtc_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp2[vtc_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp2[vtc_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp2
plt.subplot(2,3,1)
plt.hold(True)
plt.plot(times, M[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,task]-errM[:,task], M[:,task]+errM[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,task+1]-errM[:,task+1], M[:,task+1]+errM[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,task+3]-errM[:,task+3], M[:,task+3]+errM[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Lexical task: VTC')
plt.subplot(2,3,2)
plt.hold(True)
plt.plot(times, M1[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,task]-errM1[:,task], M1[:,task]+errM1[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,task+1]-errM1[:,task+1], M1[:,task+1]+errM1[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,task+3]-errM1[:,task+3], M1[:,task+3]+errM1[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Good Readers')
#plt.legend(loc='upper right')
plt.subplot(2,3,3)
plt.hold(True)
plt.plot(times, M2[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,task]-errM2[:,task], M2[:,task]+errM2[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,task+1]-errM2[:,task+1], M2[:,task+1]+errM2[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,task+3]-errM2[:,task+3], M2[:,task+3]+errM2[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Poor Readers')
task = 0
plt.subplot(2,3,4)
plt.hold(True)
plt.plot(times, M[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,task]-errM[:,task], M[:,task]+errM[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,task+1]-errM[:,task+1], M[:,task+1]+errM[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,task+3]-errM[:,task+3], M[:,task+3]+errM[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Dot task: VTC')
plt.subplot(2,3,5)
plt.hold(True)
plt.plot(times, M1[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,task]-errM1[:,task], M1[:,task]+errM1[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,task+1]-errM1[:,task+1], M1[:,task+1]+errM1[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,task+3]-errM1[:,task+3], M1[:,task+3]+errM1[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Good Readers')
#plt.legend(loc='upper right')
plt.subplot(2,3,6)
plt.hold(True)
plt.plot(times, M2[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,task]-errM2[:,task], M2[:,task]+errM2[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,task+1]-errM2[:,task+1], M2[:,task+1]+errM2[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,task+3]-errM2[:,task+3], M2[:,task+3]+errM2[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Poor Readers')
#%%
temp1 = X11[vtc_vertices_l,:,:,:]
s_group = good_readers
for iSub in np.arange(0,len(s_group)):
plt.figure(100+iSub)
plt.clf()
plt.subplot(1,2,1)
plt.hold(True)
plt.plot(times, np.mean(temp1[:,:,s_group[iSub],0],axis=0), '-', color=c_table[5])
plt.plot(times, np.mean(temp1[:,:,s_group[iSub],1],axis=0), '-', color=c_table[3])
plt.plot(times, np.mean(temp1[:,:,s_group[iSub],3],axis=0), '-', color=c_table[1])
## plt.plot([0.1, 0.1],[0, 8],'-',color='k')
plt.title(subs[s_group[iSub]])
plt.subplot(1,2,2)
plt.hold(True)
plt.plot(times, np.mean(temp1[:,:,s_group[iSub],5],axis=0), '-', color=c_table[5])
plt.plot(times, np.mean(temp1[:,:,s_group[iSub],6],axis=0), '-', color=c_table[3])
plt.plot(times, np.mean(temp1[:,:,s_group[iSub],8],axis=0), '-', color=c_table[1])
# plt.plot([0.1, 0.1],[0, 8],'-',color='k')
plt.title(subs[s_group[iSub]])
#%%
"""
Correlation
"""
X11 = X13[w_vertices,:,:,:]
mask = np.logical_and(times >= 0.22, times <= 0.26)
#dot_task = np.mean(X11[:,:,:,0],axis=0)
dot_task = np.mean(X11[:,mask,:,:],axis=0)
sts_response = np.mean(dot_task[:,:,0],axis=0) - np.mean(dot_task[:,:,3],axis=0)
#sts_response = np.mean(temp[mask,:],axis=0)
#plt.figure(20)
#plt.clf()
#ax = plt.subplot()
#ax.scatter(wid_ss[all_subject], sts_response[all_subject], s=30, c='k', alpha=0.5)
#for i, txt in enumerate(all_subject):
# ax.annotate(subs[txt], (wid_ss[txt], sts_response[txt]))
#
#np.corrcoef(sts_response[all_subject],wid_ss[all_subject])
plt.figure(20)
plt.clf()
ax = plt.subplot()
ax.scatter(twre[all_subject], sts_response[all_subject], s=30, c='k', alpha=0.5)
for i, txt in enumerate(all_subject):
ax.annotate(subs[txt], (twre[txt], sts_response[txt]))
np.corrcoef(sts_response[all_subject],twre[all_subject])
stats.pearsonr(sts_response[all_subject],twre[all_subject])
stats.ttest_ind(sts_response[good_readers],sts_response[poor_readers])
#sns.regplot(
#%%
""" V1 responses """
task = 5
plt.figure(5)
plt.clf()
M = np.mean(np.mean(X11[v1_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(X11[v1_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[v1_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[v1_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp2[v1_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp2[v1_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp2
plt.subplot(2,3,1)
plt.hold(True)
plt.plot(times, M[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,task]-errM[:,task], M[:,task]+errM[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,task+1]-errM[:,task+1], M[:,task+1]+errM[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,task+3]-errM[:,task+3], M[:,task+3]+errM[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 7])
plt.title('Lexical task: V1')
plt.subplot(2,3,2)
plt.hold(True)
plt.plot(times, M1[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,task]-errM1[:,task], M1[:,task]+errM1[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,task+1]-errM1[:,task+1], M1[:,task+1]+errM1[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,task+3]-errM1[:,task+3], M1[:,task+3]+errM1[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 7])
plt.title('Good Readers')
#plt.legend(loc='upper right')
plt.subplot(2,3,3)
plt.hold(True)
plt.plot(times, M2[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,task]-errM2[:,task], M2[:,task]+errM2[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,task+1]-errM2[:,task+1], M2[:,task+1]+errM2[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,task+3]-errM2[:,task+3], M2[:,task+3]+errM2[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 7])
plt.title('Poor Readers')
task = 0
plt.subplot(2,3,4)
plt.hold(True)
plt.plot(times, M[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,task]-errM[:,task], M[:,task]+errM[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,task+1]-errM[:,task+1], M[:,task+1]+errM[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,task+3]-errM[:,task+3], M[:,task+3]+errM[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 7])
plt.title('Dot task: V1')
plt.subplot(2,3,5)
plt.hold(True)
plt.plot(times, M1[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,task]-errM1[:,task], M1[:,task]+errM1[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,task+1]-errM1[:,task+1], M1[:,task+1]+errM1[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,task+3]-errM1[:,task+3], M1[:,task+3]+errM1[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 7])
plt.title('Good Readers')
#plt.legend(loc='upper right')
plt.subplot(2,3,6)
plt.hold(True)
plt.plot(times, M2[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,task]-errM2[:,task], M2[:,task]+errM2[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,task+1]-errM2[:,task+1], M2[:,task+1]+errM2[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,task+3]-errM2[:,task+3], M2[:,task+3]+errM2[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 7])
plt.title('Poor Readers')
#%%
plt.figure(6)
plt.clf()
task = 5
plt.subplot(2,3,1)
plt.hold(True)
plt.plot(times, M[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,task]-errM[:,task], M[:,task]+errM[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+2],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,task+2]-errM[:,task+2], M[:,task+2]+errM[:,task+2], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+4],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,task+4]-errM[:,task+4], M[:,task+4]+errM[:,task+4], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Lexical task: VTC')
plt.subplot(2,3,2)
plt.hold(True)
plt.plot(times, M1[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,task]-errM1[:,task], M1[:,task]+errM1[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+2],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,task+2]-errM1[:,task+2], M1[:,task+2]+errM1[:,task+2], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+4],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,task+4]-errM1[:,task+4], M1[:,task+4]+errM1[:,task+4], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Good Readers')
#plt.legend(loc='upper right')
plt.subplot(2,3,3)
plt.hold(True)
plt.plot(times, M2[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,task]-errM2[:,task], M2[:,task]+errM2[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+2],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,task+2]-errM2[:,task+2], M2[:,task+2]+errM2[:,task+2], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+4],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,task+4]-errM2[:,task+4], M2[:,task+4]+errM2[:,task+4], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Poor Readers')
task = 0
plt.subplot(2,3,4)
plt.hold(True)
plt.plot(times, M[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,task]-errM[:,task], M[:,task]+errM[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+2],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,task+2]-errM[:,task+2], M[:,task+2]+errM[:,task+2], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+4],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,task+4]-errM[:,task+4], M[:,task+4]+errM[:,task+4], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Dot task: VTC')
plt.subplot(2,3,5)
plt.hold(True)
plt.plot(times, M1[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,task]-errM1[:,task], M1[:,task]+errM1[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+2],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,task+2]-errM1[:,task+2], M1[:,task+2]+errM1[:,task+2], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+4],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,task+4]-errM1[:,task+4], M1[:,task+4]+errM1[:,task+4], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Good Readers')
#plt.legend(loc='upper right')
plt.subplot(2,3,6)
plt.hold(True)
plt.plot(times, M2[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,task]-errM2[:,task], M2[:,task]+errM2[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+2],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,task+2]-errM2[:,task+2], M2[:,task+2]+errM2[:,task+2], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+4],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,task+4]-errM2[:,task+4], M2[:,task+4]+errM2[:,task+4], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Poor Readers')
#%%
""" For FLUX """
plt.figure(400)
plt.clf()
cond = 5
X11 = X13[ventral_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.hold(True)
plt.plot(times, M2[:,cond],'-',linewidth=3,color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,cond]-errM2[:,cond], M2[:,cond]+errM2[:,cond], facecolor=c_table[5], alpha=0.2, edgecolor='none')
#plt.plot(times, M[:,6],'-',linewidth=3,color=c_table[3],label='Med noise')
#plt.fill_between(times, M[:,6]-errM[:,6], M[:,6]+errM[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,cond+3],'--',linewidth=3,color=c_table[3],label='High noise')
plt.fill_between(times, M2[:,cond+3]-errM2[:,cond+3], M2[:,cond+3]+errM2[:,cond+3], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-2, 3.5])
plt.xlim([-0.1,0.7])
plt.yticks([-2,-1,0,1,2,3])
#plt.title('Lexical task: VWFA')
plt.savefig('Lexical_ventral_bottomtop_poor.pdf')
#%%
for iSub in np.arange(0,len(poor_readers)):
plt.figure(100+iSub)
plt.clf()
plt.subplot(1,2,1)
plt.hold(True)
plt.plot(times, np.mean(X11[:,:,poor_readers[iSub],0],axis=0), '-', color=c_table[5])
plt.plot(times, np.mean(X11[:,:,poor_readers[iSub],1],axis=0), '-', color=c_table[3])
plt.plot(times, np.mean(X11[:,:,poor_readers[iSub],3],axis=0), '-', color=c_table[1])
## plt.plot([0.1, 0.1],[0, 8],'-',color='k')
plt.title(subs[poor_readers[iSub]])
plt.subplot(1,2,2)
plt.hold(True)
plt.plot(times, np.mean(X11[:,:,poor_readers[iSub],5],axis=0), '-', color=c_table[5])
plt.plot(times, np.mean(X11[:,:,poor_readers[iSub],6],axis=0), '-', color=c_table[3])
plt.plot(times, np.mean(X11[:,:,poor_readers[iSub],8],axis=0), '-', color=c_table[1])
# plt.plot([0.1, 0.1],[0, 8],'-',color='k')
plt.title(subs[poor_readers[iSub]])
#%%
""" Broca """
task = 5
plt.figure(3)
plt.clf()
M = np.mean(np.mean(X11[broca_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(X11[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp2[broca_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp2[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp2
plt.subplot(2,3,1)
plt.hold(True)
plt.plot(times, M[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,task]-errM[:,task], M[:,task]+errM[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,task+1]-errM[:,task+1], M[:,task+1]+errM[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,task+3]-errM[:,task+3], M[:,task+3]+errM[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Lexical task: STS')
plt.subplot(2,3,2)
plt.hold(True)
plt.plot(times, M1[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,task]-errM1[:,task], M1[:,task]+errM1[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,task+1]-errM1[:,task+1], M1[:,task+1]+errM1[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,task+3]-errM1[:,task+3], M1[:,task+3]+errM1[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Good Readers')
#plt.legend(loc='upper right')
plt.subplot(2,3,3)
plt.hold(True)
plt.plot(times, M2[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,task]-errM2[:,task], M2[:,task]+errM2[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,task+1]-errM2[:,task+1], M2[:,task+1]+errM2[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,task+3]-errM2[:,task+3], M2[:,task+3]+errM2[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Poor Readers')
task = 0
plt.subplot(2,3,4)
plt.hold(True)
plt.plot(times, M[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,task]-errM[:,task], M[:,task]+errM[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,task+1]-errM[:,task+1], M[:,task+1]+errM[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,task+3]-errM[:,task+3], M[:,task+3]+errM[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Dot task: STS')
plt.subplot(2,3,5)
plt.hold(True)
plt.plot(times, M1[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,task]-errM1[:,task], M1[:,task]+errM1[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,task+1]-errM1[:,task+1], M1[:,task+1]+errM1[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,task+3]-errM1[:,task+3], M1[:,task+3]+errM1[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Good Readers')
#plt.legend(loc='upper right')
plt.subplot(2,3,6)
plt.hold(True)
plt.plot(times, M2[:,task],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,task]-errM2[:,task], M2[:,task]+errM2[:,task], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,task+1]-errM2[:,task+1], M2[:,task+1]+errM2[:,task+1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,task+3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,task+3]-errM2[:,task+3], M2[:,task+3]+errM2[:,task+3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Poor Readers')
#%%
""" Lexical task: All subjects """
plt.figure(4)
plt.clf()
X11 = np.abs(X13[ventral_vertices,:,:,:])
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,1)
plt.hold(True)
plt.plot(times, M[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,5]-errM[:,5], M[:,5]+errM[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,6]-errM[:,6], M[:,6]+errM[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,8]-errM[:,8], M[:,8]+errM[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Lexical task: VWFA')
plt.subplot(3,3,2)
plt.hold(True)
plt.plot(times, M1[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,5]-errM1[:,5], M1[:,5]+errM1[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,6]-errM1[:,6], M1[:,6]+errM1[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,8]-errM1[:,8], M1[:,8]+errM1[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Good Readers')
plt.subplot(3,3,3)
plt.hold(True)
plt.plot(times, M2[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,5]-errM2[:,5], M2[:,5]+errM2[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,6]-errM2[:,6], M2[:,6]+errM2[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,8]-errM2[:,8], M2[:,8]+errM2[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 5])
plt.title('Poor Readers')
X11 = X13[pt_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,4)
plt.hold(True)
plt.plot(times, M[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,5]-errM[:,5], M[:,5]+errM[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,6]-errM[:,6], M[:,6]+errM[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,8]-errM[:,8], M[:,8]+errM[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Lexical task: Frontal')
plt.subplot(3,3,5)
plt.hold(True)
plt.plot(times, M1[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,5]-errM1[:,5], M1[:,5]+errM1[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,6]-errM1[:,6], M1[:,6]+errM1[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,8]-errM1[:,8], M1[:,8]+errM1[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Good Readers')
plt.subplot(3,3,6)
plt.hold(True)
plt.plot(times, M2[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,5]-errM2[:,5], M2[:,5]+errM2[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,6]-errM2[:,6], M2[:,6]+errM2[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,8]-errM2[:,8], M2[:,8]+errM2[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Poor Readers')
X11 = X13[w_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,7)
plt.hold(True)
plt.plot(times, M[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,5]-errM[:,5], M[:,5]+errM[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,6]-errM[:,6], M[:,6]+errM[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,8]-errM[:,8], M[:,8]+errM[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Lexical task: STS')
plt.subplot(3,3,8)
plt.hold(True)
plt.plot(times, M1[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,5]-errM1[:,5], M1[:,5]+errM1[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,6]-errM1[:,6], M1[:,6]+errM1[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,8]-errM1[:,8], M1[:,8]+errM1[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Good Readers')
plt.subplot(3,3,9)
plt.hold(True)
plt.plot(times, M2[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,5]-errM2[:,5], M2[:,5]+errM2[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,6]-errM2[:,6], M2[:,6]+errM2[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,8]-errM2[:,8], M2[:,8]+errM2[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Poor Readers')
#%%
""" Young vs. old """
plt.figure(3)
plt.clf()
X11 = X13[ventral_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,old_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,young_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,old_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,young_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,1)
plt.hold(True)
plt.plot(times, M[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,0]-errM[:,0], M[:,0]+errM[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,1]-errM[:,1], M[:,1]+errM[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,3]-errM[:,3], M[:,3]+errM[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Dot task: VWFA')
plt.subplot(3,3,2)
plt.hold(True)
plt.plot(times, M1[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,0]-errM1[:,0], M1[:,0]+errM1[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,1]-errM1[:,1], M1[:,1]+errM1[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,3]-errM1[:,3], M1[:,3]+errM1[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Good Readers')
plt.subplot(3,3,3)
plt.hold(True)
plt.plot(times, M2[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,0]-errM2[:,0], M2[:,0]+errM2[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,1]-errM2[:,1], M1[:,1]+errM2[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,3]-errM2[:,3], M2[:,3]+errM2[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Poor Readers')
X11 = X13[pt_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,old_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,young_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,old_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,young_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,4)
plt.hold(True)
plt.plot(times, M[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,0]-errM[:,0], M[:,0]+errM[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,1]-errM[:,1], M[:,1]+errM[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,3]-errM[:,3], M[:,3]+errM[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Dot task: Frontal')
plt.subplot(3,3,5)
plt.hold(True)
plt.plot(times, M1[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,0]-errM1[:,0], M1[:,0]+errM1[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,1]-errM1[:,1], M1[:,1]+errM1[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,3]-errM1[:,3], M1[:,3]+errM1[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Good Readers')
plt.subplot(3,3,6)
plt.hold(True)
plt.plot(times, M2[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,0]-errM2[:,0], M2[:,0]+errM2[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,1]-errM2[:,1], M2[:,1]+errM2[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,3]-errM2[:,3], M2[:,3]+errM2[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Poor Readers')
X11 = X13[w_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,old_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,young_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,old_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,young_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,7)
plt.hold(True)
plt.plot(times, M[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,0]-errM[:,0], M[:,0]+errM[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,1]-errM[:,1], M[:,1]+errM[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,3]-errM[:,3], M[:,3]+errM[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Dot task: STG')
plt.subplot(3,3,8)
plt.hold(True)
plt.plot(times, M1[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,0]-errM1[:,0], M1[:,0]+errM1[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,1]-errM1[:,1], M1[:,1]+errM1[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,3]-errM1[:,3], M1[:,3]+errM1[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Good Readers')
plt.subplot(3,3,9)
plt.hold(True)
plt.plot(times, M2[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,0]-errM2[:,0], M2[:,0]+errM2[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,1]-errM2[:,1], M2[:,1]+errM2[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,3]-errM2[:,3], M2[:,3]+errM2[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Poor Readers')
"""
Correlation
"""
mask = np.logical_and(times >= 0.22, times <= 0.26)
temp = np.mean(X11[:,:,:,0],axis=0)
sts_response = np.mean(temp[mask,:],axis=0)
plt.figure(30)
plt.clf()
plt.hold(True)
ax = plt.subplot()
ax.scatter(twre[old_readers], sts_response[old_readers], s=30, c='r', alpha=1)
for i, txt in enumerate(old_readers):
ax.annotate(age[txt], (twre[txt], sts_response[txt]))
ax.scatter(twre[young_readers], sts_response[young_readers], s=30, c='b', alpha=1)
for i, txt in enumerate(young_readers):
ax.annotate(age[txt], (twre[txt], sts_response[txt]))
np.corrcoef(sts_response[young_readers],twre[young_readers])
np.pear
np.corrcoef(sts_response[old_readers],twre[old_readers])
np.corrcoef(sts_response[all_subject],twre[all_subject])
#%%
""" Dot task: V1 """
axis_font = {'fontname':'Arial', 'size':'16'}
font_prop = font_manager.FontProperties(size=12)
#ax = plt.subplot() # Defines ax variable by creating an empty plot
## Set the tick labels font
#for label in (ax.get_xticklabels() + ax.get_yticklabels()):
# label.set_fontname('Arial')
# label.set_fontsize(13)
#plt.hold(True)
#
#plt.plot(times, M[:,0],'-',color=c_table[5],label='Low noise')
#plt.fill_between(times, M[:,0]-errM[:,0], M[:,0]+errM[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
#
#plt.plot(times, M[:,1],'-',color=c_table[3],label='Med noise')
#plt.fill_between(times, M[:,1]-errM[:,1], M[:,1]+errM[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
#
#plt.plot(times, M[:,3],'-',color=c_table[1],label='High noise')
#plt.fill_between(times, M[:,3]-errM[:,3], M[:,3]+errM[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
#
#plt.grid(b=True)
#plt.ylim([-1, 2])
#plt.legend(loc='upper right', prop=font_prop)
#plt.xlabel('Time after stimulus onset (s)', **axis_font)
#plt.ylabel('dSPM amplitude', **axis_font)
#plt.show()
plt.figure(1)
plt.clf()
X11 = X13[v1_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
plt.subplot(2,3,1)
plt.hold(True)
plt.plot(times, M[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,0]-errM[:,0], M[:,0]+errM[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,1]-errM[:,1], M[:,1]+errM[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,3]-errM[:,3], M[:,3]+errM[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 2])
plt.legend()
plt.title('Dot task: V1')
plt.subplot(2,3,4)
plt.hold(True)
plt.plot(times, M[:,0],'-',color=c_table[5],label='High contrast')
plt.fill_between(times, M[:,0]-errM[:,0], M[:,0]+errM[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,2],'-',color=c_table[7],label='Low contrast')
plt.fill_between(times, M[:,2]-errM[:,2], M[:,2]+errM[:,2], facecolor=c_table[7], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 2])
plt.legend()
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(2, 3, 2)
plt.hold(True)
plt.plot(times, M1[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,0]-errM1[:,0], M1[:,0]+errM1[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,1]-errM1[:,1], M1[:,1]+errM1[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,3]-errM1[:,3], M1[:,3]+errM1[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 2])
plt.title('Dot task (GR): V1')
plt.subplot(2,3,5)
plt.hold(True)
plt.plot(times, M1[:,0],'-',color=c_table[5],label='High contrast')
plt.fill_between(times, M1[:,0]-errM1[:,0], M1[:,0]+errM1[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,2],'-',color=c_table[7],label='Low contrast')
plt.fill_between(times, M1[:,2]-errM1[:,2], M1[:,2]+errM1[:,2], facecolor=c_table[7], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 2])
plt.subplot(2, 3, 3)
plt.hold(True)
plt.plot(times, M2[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,0]-errM2[:,0], M2[:,0]+errM2[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,1],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,1]-errM2[:,1], M2[:,1]+errM2[:,1], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,3],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,3]-errM2[:,3], M2[:,3]+errM2[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 2])
plt.title('Dot task (PR): V1')
plt.subplot(2,3,6)
plt.hold(True)
plt.plot(times, M2[:,0],'-',color=c_table[5],label='High contrast')
plt.fill_between(times, M2[:,0]-errM2[:,0], M2[:,0]+errM2[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,2],'-',color=c_table[7],label='Low contrast')
plt.fill_between(times, M2[:,2]-errM2[:,2], M2[:,2]+errM2[:,2], facecolor=c_table[7], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 2])
""" Plot individual V1 responses """
#for iSub in np.arange(0,len(poor_readers)):
# plt.figure(100+iSub)
# plt.clf()
# plt.subplot(1,2,1)
# plt.hold(True)
# plt.plot(times, np.mean(X1[v1_vertices,:,poor_readers[iSub],0],axis=0), '--', color=c_table[5])
# plt.plot(times, np.mean(X1[v1_vertices,:,poor_readers[iSub],1],axis=0), '--', color=c_table[3])
# plt.plot(times, np.mean(X1[v1_vertices,:,poor_readers[iSub],3],axis=0), '--', color=c_table[1])
# plt.plot([0.1, 0.1],[0, 8],'-',color='k')
# plt.title(subs[poor_readers[iSub]])
# plt.subplot(1,2,2)
# plt.hold(True)
# plt.plot(times, np.mean(X1[v1_vertices,:,poor_readers[iSub],5],axis=0), '-', color=c_table[5])
# plt.plot(times, np.mean(X1[v1_vertices,:,poor_readers[iSub],6],axis=0), '-', color=c_table[3])
# plt.plot(times, np.mean(X1[v1_vertices,:,poor_readers[iSub],8],axis=0), '-', color=c_table[1])
# plt.plot([0.1, 0.1],[0, 8],'-',color='k')
# plt.title(subs[poor_readers[iSub]])
#%%
""" Low contrast vs. high contrast """
plt.figure(3)
plt.clf()
X11 = X13[ventral_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,1)
plt.hold(True)
plt.plot(times, M[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,0]-errM[:,0], M[:,0]+errM[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,2],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,2]-errM[:,2], M[:,2]+errM[:,2], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Dot task: VWFA')
plt.subplot(3,3,2)
plt.hold(True)
plt.plot(times, M1[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,0]-errM1[:,0], M1[:,0]+errM1[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,2],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,2]-errM1[:,2], M1[:,2]+errM1[:,2], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Good Readers')
plt.subplot(3,3,3)
plt.hold(True)
plt.plot(times, M2[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,0]-errM2[:,0], M2[:,0]+errM2[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,2],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,2]-errM2[:,2], M2[:,2]+errM2[:,2], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Poor Readers')
X11 = X13[pt_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,4)
plt.hold(True)
plt.plot(times, M[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,0]-errM[:,0], M[:,0]+errM[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,2],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,2]-errM[:,2], M[:,2]+errM[:,2], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Dot task: Frontal')
plt.subplot(3,3,5)
plt.hold(True)
plt.plot(times, M1[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,0]-errM1[:,0], M1[:,0]+errM1[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,2],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,2]-errM1[:,2], M1[:,2]+errM1[:,2], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Good Readers')
plt.subplot(3,3,6)
plt.hold(True)
plt.plot(times, M2[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,0]-errM2[:,0], M2[:,0]+errM2[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,2],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,2]-errM2[:,2], M2[:,2]+errM2[:,2], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Poor Readers')
X11 = X13[w_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,7)
plt.hold(True)
plt.plot(times, M[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,0]-errM[:,0], M[:,0]+errM[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,2],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,2]-errM[:,2], M[:,2]+errM[:,2], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Dot task: STG')
plt.subplot(3,3,8)
plt.hold(True)
plt.plot(times, M1[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,0]-errM1[:,0], M1[:,0]+errM1[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,2],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,2]-errM1[:,2], M1[:,2]+errM1[:,2], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Good Readers')
plt.subplot(3,3,9)
plt.hold(True)
plt.plot(times, M2[:,0],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,0]-errM2[:,0], M2[:,0]+errM2[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,2],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,2]-errM2[:,2], M2[:,2]+errM2[:,2], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Poor Readers')
#%%
""" Task effects in V1"""
plt.figure(5)
plt.clf()
X11 = X13[v1_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,1)
plt.hold(True)
plt.plot(times, M[:,0],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M[:,0]-errM[:,0], M[:,0]+errM[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,5],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M[:,5]-errM[:,5], M[:,5]+errM[:,5], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2])
plt.legend()
plt.title('V1: Low Noise - all')
plt.subplot(3,3,2)
plt.hold(True)
plt.plot(times, M[:,1],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M[:,1]-errM[:,1], M[:,1]+errM[:,1], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,6],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M[:,6]-errM[:,6], M[:,6]+errM[:,6], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2])
plt.title('V1: Med Noise - all')
plt.subplot(3,3,3)
plt.hold(True)
plt.plot(times, M[:,3],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M[:,3]-errM[:,3], M[:,3]+errM[:,3], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,8],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M[:,8]-errM[:,8], M[:,8]+errM[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2])
plt.title('V1:High Noise - all')
plt.subplot(3,3,4)
plt.hold(True)
plt.plot(times, M1[:,0],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M1[:,0]-errM1[:,0], M1[:,0]+errM1[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,5],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M1[:,5]-errM1[:,5], M1[:,5]+errM1[:,5], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2])
plt.title('Low Noise - good')
plt.subplot(3,3,5)
plt.hold(True)
plt.plot(times, M1[:,1],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M1[:,1]-errM1[:,1], M1[:,1]+errM1[:,1], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,6],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M1[:,6]-errM1[:,6], M1[:,6]+errM1[:,6], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2])
plt.title('Med Noise - good')
plt.subplot(3,3,6)
plt.hold(True)
plt.plot(times, M1[:,3],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M1[:,3]-errM1[:,3], M1[:,3]+errM1[:,3], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,8],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M1[:,8]-errM1[:,8], M1[:,8]+errM1[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2])
plt.title('High Noise - good')
plt.subplot(3,3,7)
plt.hold(True)
plt.plot(times, M2[:,0],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M2[:,0]-errM2[:,0], M2[:,0]+errM2[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,5],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M2[:,5]-errM2[:,5], M2[:,5]+errM2[:,5], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2])
plt.title('Low Noise - poor')
plt.subplot(3,3,8)
plt.hold(True)
plt.plot(times, M2[:,1],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M2[:,1]-errM2[:,1], M2[:,1]+errM2[:,1], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,6],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M2[:,6]-errM2[:,6], M2[:,6]+errM2[:,6], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2])
plt.title('Med Noise - poor')
plt.subplot(3,3,9)
plt.hold(True)
plt.plot(times, M2[:,3],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M2[:,3]-errM2[:,3], M2[:,3]+errM2[:,3], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,8],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M2[:,8]-errM2[:,8], M2[:,8]+errM2[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2])
plt.title('High Noise - poor')
#%%
""" Task effects in VWFA"""
plt.figure(6)
plt.clf()
X11 = X13[ventral_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,1)
plt.hold(True)
plt.plot(times, M[:,0],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M[:,0]-errM[:,0], M[:,0]+errM[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,5],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M[:,5]-errM[:,5], M[:,5]+errM[:,5], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2.5])
#plt.legend()
plt.title('VWFA: Low Noise - all')
plt.subplot(3,3,2)
plt.hold(True)
plt.plot(times, M[:,1],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M[:,1]-errM[:,1], M[:,1]+errM[:,1], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,6],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M[:,6]-errM[:,6], M[:,6]+errM[:,6], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2.5])
plt.title('VWFA: Med Noise - all')
plt.subplot(3,3,3)
plt.hold(True)
plt.plot(times, M[:,3],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M[:,3]-errM[:,3], M[:,3]+errM[:,3], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,8],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M[:,8]-errM[:,8], M[:,8]+errM[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2.5])
plt.title('VWFA:High Noise - all')
plt.subplot(3,3,4)
plt.hold(True)
plt.plot(times, M1[:,0],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M1[:,0]-errM1[:,0], M1[:,0]+errM1[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,5],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M1[:,5]-errM1[:,5], M1[:,5]+errM1[:,5], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2.5])
plt.title('Low Noise - good')
plt.subplot(3,3,5)
plt.hold(True)
plt.plot(times, M1[:,1],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M1[:,1]-errM1[:,1], M1[:,1]+errM1[:,1], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,6],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M1[:,6]-errM1[:,6], M1[:,6]+errM1[:,6], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2.5])
plt.title('Med Noise - good')
plt.subplot(3,3,6)
plt.hold(True)
plt.plot(times, M1[:,3],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M1[:,3]-errM1[:,3], M1[:,3]+errM1[:,3], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,8],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M1[:,8]-errM1[:,8], M1[:,8]+errM1[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2.5])
plt.title('High Noise - good')
plt.subplot(3,3,7)
plt.hold(True)
plt.plot(times, M2[:,0],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M2[:,0]-errM2[:,0], M2[:,0]+errM2[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,5],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M2[:,5]-errM2[:,5], M2[:,5]+errM2[:,5], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2.5])
plt.title('Low Noise - poor')
plt.subplot(3,3,8)
plt.hold(True)
plt.plot(times, M2[:,1],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M2[:,1]-errM2[:,1], M2[:,1]+errM2[:,1], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,6],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M2[:,6]-errM2[:,6], M2[:,6]+errM2[:,6], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2.5])
plt.title('Med Noise - poor')
plt.subplot(3,3,9)
plt.hold(True)
plt.plot(times, M2[:,3],'--',color=c_table[5],label='Dot')
plt.fill_between(times, M2[:,3]-errM2[:,3], M2[:,3]+errM2[:,3], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,8],'-',color=c_table[1],label='Lexical')
plt.fill_between(times, M2[:,8]-errM2[:,8], M2[:,8]+errM2[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.ylim([-1, 2.5])
plt.title('High Noise - poor')
#%%
""" Good vs. Poor in VWFA"""
plt.figure(7)
plt.clf()
X11 = X13[ventral_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(2, 3, 1)
plt.hold(True)
plt.plot(times, M2[:,0],'-',color=c_table[1],label='Poor readers')
plt.fill_between(times, M2[:,0]-errM2[:,0], M2[:,0]+errM2[:,0], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,0],'-',color=c_table[5],label='Good readers')
plt.fill_between(times, M1[:,0]-errM1[:,0], M1[:,0]+errM1[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.legend()
plt.title('VWFA: Low noise, Dot task')
plt.subplot(2, 3, 2)
plt.hold(True)
plt.plot(times, M2[:,1],'-',color=c_table[1],label='Poor readers')
plt.fill_between(times, M2[:,1]-errM2[:,1], M2[:,1]+errM2[:,1], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,1],'-',color=c_table[5],label='Good readers')
plt.fill_between(times, M1[:,1]-errM1[:,1], M1[:,1]+errM1[:,1], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.legend()
plt.title('VWFA: Med noise, Dot task')
plt.subplot(2, 3, 3)
plt.hold(True)
plt.plot(times, M2[:,3],'-',color=c_table[1],label='Poor readers')
plt.fill_between(times, M2[:,3]-errM2[:,3], M2[:,3]+errM2[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,3],'-',color=c_table[5],label='Good readers')
plt.fill_between(times, M1[:,3]-errM1[:,3], M1[:,3]+errM1[:,3], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.legend()
plt.title('VWFA: High noise, Dot task')
plt.subplot(2, 3, 4)
plt.hold(True)
plt.plot(times, M2[:,5],'-',color=c_table[1],label='Poor readers')
plt.fill_between(times, M2[:,5]-errM2[:,5], M2[:,5]+errM2[:,5], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,5],'-',color=c_table[5],label='Good readers')
plt.fill_between(times, M1[:,5]-errM1[:,5], M1[:,5]+errM1[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.legend()
plt.title('VWFA: Low noise, Lexical task')
plt.subplot(2, 3, 5)
plt.hold(True)
plt.plot(times, M2[:,6],'-',color=c_table[1],label='Poor readers')
plt.fill_between(times, M2[:,6]-errM2[:,6], M2[:,6]+errM2[:,6], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,6],'-',color=c_table[5],label='Good readers')
plt.fill_between(times, M1[:,6]-errM1[:,6], M1[:,6]+errM1[:,6], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.legend()
plt.title('VWFA: Med noise, Lexical task')
plt.subplot(2, 3, 6)
plt.hold(True)
plt.plot(times, M2[:,8],'-',color=c_table[1],label='Poor readers')
plt.fill_between(times, M2[:,8]-errM2[:,8], M2[:,8]+errM2[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,3],'-',color=c_table[5],label='Good readers')
plt.fill_between(times, M1[:,8]-errM1[:,8], M1[:,8]+errM1[:,8], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.legend()
plt.title('VWFA: High noise, Lexical task')
#%%
""" Good vs. Poor in STS"""
plt.figure(8)
plt.clf()
X11 = X13[w_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(2, 3, 1)
plt.hold(True)
plt.plot(times, M2[:,0],'-',color=c_table[1],label='Poor readers')
plt.fill_between(times, M2[:,0]-errM2[:,0], M2[:,0]+errM2[:,0], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,0],'-',color=c_table[5],label='Good readers')
plt.fill_between(times, M1[:,0]-errM1[:,0], M1[:,0]+errM1[:,0], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.legend()
plt.title('STS: Low noise, Dot task')
plt.subplot(2, 3, 2)
plt.hold(True)
plt.plot(times, M2[:,1],'-',color=c_table[1],label='Poor readers')
plt.fill_between(times, M2[:,1]-errM2[:,1], M2[:,1]+errM2[:,1], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,1],'-',color=c_table[5],label='Good readers')
plt.fill_between(times, M1[:,1]-errM1[:,1], M1[:,1]+errM1[:,1], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.legend()
plt.title('STS: Med noise, Dot task')
plt.subplot(2, 3, 3)
plt.hold(True)
plt.plot(times, M2[:,3],'-',color=c_table[1],label='Poor readers')
plt.fill_between(times, M2[:,3]-errM2[:,3], M2[:,3]+errM2[:,3], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,3],'-',color=c_table[5],label='Good readers')
plt.fill_between(times, M1[:,3]-errM1[:,3], M1[:,3]+errM1[:,3], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.legend()
plt.title('STS: High noise, Dot task')
plt.subplot(2, 3, 4)
plt.hold(True)
plt.plot(times, M2[:,5],'-',color=c_table[1],label='Poor readers')
plt.fill_between(times, M2[:,5]-errM2[:,5], M2[:,5]+errM2[:,5], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,5],'-',color=c_table[5],label='Good readers')
plt.fill_between(times, M1[:,5]-errM1[:,5], M1[:,5]+errM1[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.legend()
plt.title('STS: Low noise, Lexical task')
plt.subplot(2, 3, 5)
plt.hold(True)
plt.plot(times, M2[:,6],'-',color=c_table[1],label='Poor readers')
plt.fill_between(times, M2[:,6]-errM2[:,6], M2[:,6]+errM2[:,6], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,6],'-',color=c_table[5],label='Good readers')
plt.fill_between(times, M1[:,6]-errM1[:,6], M1[:,6]+errM1[:,6], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.legend()
plt.title('STS: Med noise, Lexical task')
plt.subplot(2, 3, 6)
plt.hold(True)
plt.plot(times, M2[:,8],'-',color=c_table[1],label='Poor readers')
plt.fill_between(times, M2[:,8]-errM2[:,8], M2[:,8]+errM2[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,3],'-',color=c_table[5],label='Good readers')
plt.fill_between(times, M1[:,8]-errM1[:,8], M1[:,8]+errM1[:,8], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.legend()
plt.title('STS: High noise, Lexical task')
#%%
""" Task effects """
plt.figure(7)
plt.clf()
X11 = X1[ventral_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 1)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Ventral')
plt.subplot(3, 3, 2)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Ventral')
plt.subplot(3, 3, 3)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Ventral')
X11 = X1[broca_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 4)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Frontal')
plt.subplot(3, 3, 5)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Frontal')
plt.subplot(3, 3, 6)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Frontal')
X11 = X1[w_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 7)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M1[:,[0]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[0]],axis=1)-yerr, np.mean(M1[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M1[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[5]],axis=1)-yerr, np.mean(M1[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Temporal')
plt.subplot(3, 3, 8)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M1[:,[1]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[1]],axis=1)-yerr, np.mean(M1[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M1[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[6]],axis=1)-yerr, np.mean(M1[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Temporal')
plt.subplot(3, 3, 9)
plt.hold(True)
plt.plot(times, np.mean(M1[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M1[:,[3]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[3]],axis=1)-yerr, np.mean(M1[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M1[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M1[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M1[:,[8]],axis=1)-yerr, np.mean(M1[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Temporal')
#%%
""" Task effects """
plt.figure(8)
plt.clf()
X11 = X1[ventral_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 1)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Ventral')
plt.subplot(3, 3, 2)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Ventral')
plt.subplot(3, 3, 3)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Ventral')
X11 = X1[broca_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 4)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Frontal')
plt.subplot(3, 3, 5)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Frontal')
plt.subplot(3, 3, 6)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Frontal')
X11 = X1[w_vertices,:,:,:]
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
plt.subplot(3, 3, 7)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[0]],axis=1),'--',color=c_table[5])
yerr = np.std(np.mean(M2[:,[0]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[0]],axis=1)-yerr, np.mean(M2[:,[0]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[5]],axis=1),'-',color=c_table[5])
yerr = np.std(np.mean(M2[:,[5]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[5]],axis=1)-yerr, np.mean(M2[:,[5]],axis=1)+yerr, facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Temporal')
plt.subplot(3, 3, 8)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[1]],axis=1),'--',color=c_table[3])
yerr = np.std(np.mean(M2[:,[1]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[1]],axis=1)-yerr, np.mean(M2[:,[1]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[6]],axis=1),'-',color=c_table[3])
yerr = np.std(np.mean(M2[:,[6]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[6]],axis=1)-yerr, np.mean(M2[:,[6]],axis=1)+yerr, facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Temporal')
plt.subplot(3, 3, 9)
plt.hold(True)
plt.plot(times, np.mean(M2[:,[3]],axis=1),'--',color=c_table[1])
yerr = np.std(np.mean(M2[:,[3]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[3]],axis=1)-yerr, np.mean(M2[:,[3]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.plot(times, np.mean(M2[:,[8]],axis=1),'-',color=c_table[1])
yerr = np.std(np.mean(M2[:,[8]],axis=1)) / np.sqrt(M.shape[1])
plt.fill_between(times, np.mean(M2[:,[8]],axis=1)-yerr, np.mean(M2[:,[8]],axis=1)+yerr, facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([0, 4])
plt.title('GR: Temporal')
#%%
plt.subplot(3,3,2)
plt.hold(True)
plt.plot(times, M1[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,5]-errM1[:,5], M1[:,5]+errM1[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,6]-errM1[:,6], M1[:,6]+errM1[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,8]-errM1[:,8], M1[:,8]+errM1[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Good Readers')
plt.subplot(3,3,3)
plt.hold(True)
plt.plot(times, M2[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,5]-errM2[:,5], M2[:,5]+errM2[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,6]-errM2[:,6], M2[:,6]+errM2[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,8]-errM2[:,8], M2[:,8]+errM2[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Poor Readers')
X11 = X13[broca_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,4)
plt.hold(True)
plt.plot(times, M[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,5]-errM[:,5], M[:,5]+errM[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,6]-errM[:,6], M[:,6]+errM[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,8]-errM[:,8], M[:,8]+errM[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Lexical task: Frontal')
plt.subplot(3,3,5)
plt.hold(True)
plt.plot(times, M1[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,5]-errM1[:,5], M1[:,5]+errM1[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,6]-errM1[:,6], M1[:,6]+errM1[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,8]-errM1[:,8], M1[:,8]+errM1[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Good Readers')
plt.subplot(3,3,6)
plt.hold(True)
plt.plot(times, M2[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,5]-errM2[:,5], M2[:,5]+errM2[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,6]-errM2[:,6], M2[:,6]+errM2[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,8]-errM2[:,8], M2[:,8]+errM2[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Poor Readers')
X11 = X13[w_vertices,:,:,:]
M = np.mean(np.mean(X11[:,:,all_subject,:],axis=0),axis=1)
M1 = np.mean(np.mean(X11[:,:,good_readers,:],axis=0),axis=1)
M2 = np.mean(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1)
errM = np.std(np.mean(X11[:,:,all_subject,:],axis=0),axis=1) / np.sqrt(len(all_subject))
errM1 = np.std(np.mean(X11[:,:,good_readers,:],axis=0),axis=1) / np.sqrt(len(good_readers))
errM2 = np.std(np.mean(X11[:,:,poor_readers,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
plt.subplot(3,3,7)
plt.hold(True)
plt.plot(times, M[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M[:,5]-errM[:,5], M[:,5]+errM[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M[:,6]-errM[:,6], M[:,6]+errM[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M[:,8]-errM[:,8], M[:,8]+errM[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Lexical task: STS')
plt.subplot(3,3,8)
plt.hold(True)
plt.plot(times, M1[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M1[:,5]-errM1[:,5], M1[:,5]+errM1[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M1[:,6]-errM1[:,6], M1[:,6]+errM1[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M1[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M1[:,8]-errM1[:,8], M1[:,8]+errM1[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Good Readers')
plt.subplot(3,3,9)
plt.hold(True)
plt.plot(times, M2[:,5],'-',color=c_table[5],label='Low noise')
plt.fill_between(times, M2[:,5]-errM2[:,5], M2[:,5]+errM2[:,5], facecolor=c_table[5], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,6],'-',color=c_table[3],label='Med noise')
plt.fill_between(times, M2[:,6]-errM2[:,6], M2[:,6]+errM2[:,6], facecolor=c_table[3], alpha=0.2, edgecolor='none')
plt.plot(times, M2[:,8],'-',color=c_table[1],label='High noise')
plt.fill_between(times, M2[:,8]-errM2[:,8], M2[:,8]+errM2[:,8], facecolor=c_table[1], alpha=0.2, edgecolor='none')
plt.grid(b=True)
plt.ylim([-1, 3])
plt.title('Poor Readers')
| bsd-3-clause |
gifford-lab/bcbio-nextgen | bcbio/structural/validate.py | 3 | 9507 | """Provide validation of structural variations against truth sets.
Tests overlaps of the combined ensemble structural variant BED against
a set of known regions. Requires any overlap between ensemble set and
known regions, and removes regions from analysis that overlap with
exclusion regions.
"""
import csv
import os
import toolz as tz
import numpy as np
import pandas as pd
import pybedtools
try:
import matplotlib as mpl
mpl.use('Agg', force=True)
import matplotlib.pyplot as plt
import seaborn as sns
except ImportError:
mpl, plt, sns = None, None, None
from bcbio.log import logger
from bcbio import utils
from bcbio.pipeline import datadict as dd
from bcbio.structural import convert
from bcbio.distributed.transaction import file_transaction
EVENT_SIZES = [(100, 450), (450, 2000), (2000, 4000), (4000, 20000), (20000, 60000),
(60000, int(1e6))]
def _stat_str(x, n):
if n > 0:
val = float(x) / float(n) * 100.0
return {"label": "%.1f%% (%s / %s)" % (val, x, n), "val": val}
else:
return {"label": "", "val": 0}
def cnv_to_event(name, data):
"""Convert a CNV to an event name.
"""
ploidy = dd.get_ploidy(data)
if name.startswith("cnv"):
num = max([int(x) for x in name.split("_")[0].replace("cnv", "").split(";")])
if num < ploidy:
return "DEL"
elif num > ploidy:
return "DUP"
else:
return name
else:
return name
def _evaluate_one(caller, svtype, size_range, ensemble, truth, data):
"""Compare a ensemble results for a caller against a specific caller and SV type.
"""
def cnv_matches(name):
return cnv_to_event(name, data) == svtype
def is_breakend(name):
return name.startswith("BND")
def in_size_range(max_buffer=0):
def _work(feat):
minf, maxf = size_range
buffer = min(max_buffer, int(((maxf + minf) / 2.0) / 10.0))
size = feat.end - feat.start
return size >= max([0, minf - buffer]) and size < maxf + buffer
return _work
def is_caller_svtype(feat):
for name in feat.name.split(","):
if ((name.startswith(svtype) or cnv_matches(name) or is_breakend(name))
and (caller == "sv-ensemble" or name.endswith(caller))):
return True
return False
minf, maxf = size_range
efeats = pybedtools.BedTool(ensemble).filter(in_size_range(0)).filter(is_caller_svtype).saveas().sort().merge()
tfeats = pybedtools.BedTool(truth).filter(in_size_range(0)).sort().merge().saveas()
etotal = efeats.count()
ttotal = tfeats.count()
match = efeats.intersect(tfeats, u=True).sort().merge().saveas().count()
return {"sensitivity": _stat_str(match, ttotal),
"precision": _stat_str(match, etotal)}
def _evaluate_multi(calls, truth_svtypes, work_dir, data):
base = os.path.join(work_dir, "%s-sv-validate" % (dd.get_sample_name(data)))
out_file = base + ".csv"
df_file = base + "-df.csv"
if any((not utils.file_uptodate(out_file, x["vrn_file"])
or not utils.file_uptodate(df_file, x["vrn_file"])) for x in calls):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
with open(df_file, "w") as df_out_handle:
writer = csv.writer(out_handle)
dfwriter = csv.writer(df_out_handle)
writer.writerow(["svtype", "size", "caller", "sensitivity", "precision"])
dfwriter.writerow(["svtype", "size", "caller", "metric", "value", "label"])
for svtype, truth in truth_svtypes.items():
for size in EVENT_SIZES:
str_size = "%s-%s" % size
for call in calls:
call_bed = convert.to_bed(call, dd.get_sample_name(data), work_dir, calls, data)
if utils.file_exists(call_bed):
evalout = _evaluate_one(call["variantcaller"], svtype, size, call_bed,
truth, data)
writer.writerow([svtype, str_size, call["variantcaller"],
evalout["sensitivity"]["label"], evalout["precision"]["label"]])
for metric in ["sensitivity", "precision"]:
dfwriter.writerow([svtype, str_size, call["variantcaller"], metric,
evalout[metric]["val"], evalout[metric]["label"]])
return out_file, df_file
def _plot_evaluation(df_csv):
if mpl is None or plt is None or sns is None:
not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None])
logger.info("No validation plot. Missing imports: %s" % not_found)
return None
df = pd.read_csv(df_csv).fillna("0%")
out = {}
for event in df["svtype"].unique():
out[event] = _plot_evaluation_event(df_csv, event)
return out
def _plot_evaluation_event(df_csv, svtype):
"""Provide plot of evaluation metrics for an SV event, stratified by event size.
"""
titles = {"INV": "Inversions", "DEL": "Deletions", "DUP": "Duplications",
"INS": "Insertions"}
out_file = "%s-%s.png" % (os.path.splitext(df_csv)[0], svtype)
sns.set(style='white')
if not utils.file_uptodate(out_file, df_csv):
metrics = ["sensitivity", "precision"]
df = pd.read_csv(df_csv).fillna("0%")
df = df[(df["svtype"] == svtype)]
event_sizes = _find_events_to_include(df, EVENT_SIZES)
fig, axs = plt.subplots(len(event_sizes), len(metrics), tight_layout=True)
if len(event_sizes) == 1:
axs = [axs]
callers = sorted(df["caller"].unique())
if "sv-ensemble" in callers:
callers.remove("sv-ensemble")
callers.append("sv-ensemble")
for i, size in enumerate(event_sizes):
size_label = "%s to %sbp" % size
size = "%s-%s" % size
for j, metric in enumerate(metrics):
ax = axs[i][j]
ax.get_xaxis().set_ticks([])
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlim(0, 125.0)
if i == 0:
ax.set_title(metric, size=12, y=1.2)
vals, labels = _get_plot_val_labels(df, size, metric, callers)
ax.barh(np.arange(len(vals)), vals)
if j == 0:
ax.tick_params(axis='y', which='major', labelsize=8)
ax.locator_params(nbins=len(callers) + 2, axis="y", tight=True)
ax.set_yticklabels(callers, va="bottom")
ax.text(100, len(callers), size_label, fontsize=10)
else:
ax.get_yaxis().set_ticks([])
for ai, (val, label) in enumerate(zip(vals, labels)):
ax.annotate(label, (val + 0.75, ai + 0.35), va='center', size=7)
if svtype in titles:
fig.text(0.025, 0.95, titles[svtype], size=14)
fig.set_size_inches(7, len(event_sizes) + 1)
fig.savefig(out_file)
return out_file
def _find_events_to_include(df, event_sizes):
out = []
for size in event_sizes:
str_size = "%s-%s" % size
curdf = df[(df["size"] == str_size) & (df["metric"] == "sensitivity")]
for val in list(curdf["label"]):
if val != "0%":
out.append(size)
break
return out
def _get_plot_val_labels(df, size, metric, callers):
curdf = df[(df["size"] == size) & (df["metric"] == metric)]
vals, labels = [], []
for caller in callers:
row = curdf[curdf["caller"] == caller]
val = list(row["value"])[0]
if val == 0:
val = 0.1
vals.append(val)
labels.append(list(row["label"])[0])
return vals, labels
def evaluate(data):
"""Provide evaluations for multiple callers split by structural variant type.
"""
work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "validate"))
truth_sets = tz.get_in(["config", "algorithm", "svvalidate"], data)
if truth_sets and data.get("sv"):
val_summary, df_csv = _evaluate_multi(data["sv"], truth_sets, work_dir, data)
summary_plots = _plot_evaluation(df_csv)
data["sv-validate"] = {"csv": val_summary, "plot": summary_plots, "df": df_csv}
return data
if __name__ == "__main__":
#_, df_csv = _evaluate_multi(["lumpy", "delly", "wham", "sv-ensemble"],
# {"DEL": "synthetic_challenge_set3_tumor_20pctmasked_truth_sv_DEL.bed"},
# "syn3-tumor-ensemble-filter.bed", "sv_exclude.bed")
#_, df_csv = _evaluate_multi(["lumpy", "delly", "cn_mops", "sv-ensemble"],
# {"DEL": "NA12878.50X.ldgp.molpb_val.20140508.bed"},
# "NA12878-ensemble.bed", "LCR.bed.gz")
import sys
_plot_evaluation(sys.argv[1])
| mit |
jaeilepp/mne-python | examples/decoding/plot_decoding_csp_timefreq.py | 1 | 6383 | """
============================================================================
Decoding in time-frequency space data using the Common Spatial Pattern (CSP)
============================================================================
The time-frequency decomposition is estimated by iterating over raw data that
has been band-passed at different frequencies. This is used to compute a
covariance matrix over each epoch or a rolling time-window and extract the CSP
filtered signals. A linear discriminant classifier is then applied to these
signals.
"""
# Authors: Laura Gwilliams <[email protected]>
# Jean-Remi King <[email protected]>
# Alex Barachant <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import Epochs, find_events, create_info
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
from mne.time_frequency import AverageTFR
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
###############################################################################
# Set parameters and read data
event_id = dict(hands=2, feet=3) # motor imagery: hands vs feet
subject = 1
runs = [6, 10, 14]
raw_fnames = eegbci.load_data(subject, runs)
raw_files = [read_raw_edf(f, stim_channel='auto', preload=True)
for f in raw_fnames]
raw = concatenate_raws(raw_files)
# Extract information from the raw file
sfreq = raw.info['sfreq']
events = find_events(raw, shortest_event=0, stim_channel='STI 014')
raw.pick_types(meg=False, eeg=True, stim=False, eog=False, exclude='bads')
# Assemble the classifier using scikit-learn pipeline
clf = make_pipeline(CSP(n_components=4, reg=None, log=True),
LinearDiscriminantAnalysis())
n_splits = 5 # how many folds to use for cross-validation
cv = StratifiedKFold(n_splits=n_splits, shuffle=True)
# Classification & Time-frequency parameters
tmin, tmax = -.200, 2.000
n_cycles = 10. # how many complete cycles: used to define window size
min_freq = 5.
max_freq = 25.
n_freqs = 8 # how many frequency bins to use
# Assemble list of frequency range tuples
freqs = np.linspace(min_freq, max_freq, n_freqs) # assemble frequencies
freq_ranges = list(zip(freqs[:-1], freqs[1:])) # make freqs list of tuples
# Infer window spacing from the max freq and number of cycles to avoid gaps
window_spacing = (n_cycles / np.max(freqs) / 2.)
centered_w_times = np.arange(tmin, tmax, window_spacing)[1:]
n_windows = len(centered_w_times)
# Instantiate label encoder
le = LabelEncoder()
###############################################################################
# Loop through frequencies, apply classifier and save scores
# init scores
freq_scores = np.zeros((n_freqs - 1,))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
X = epochs.get_data()
# Save mean scores over folds for each frequency and time window
freq_scores[freq] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
###############################################################################
# Plot frequency results
plt.bar(left=freqs[:-1], height=freq_scores, width=np.diff(freqs)[0],
align='edge', edgecolor='black')
plt.xticks(freqs)
plt.ylim([0, 1])
plt.axhline(len(epochs['feet']) / len(epochs), color='k', linestyle='--',
label='chance level')
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.ylabel('Decoding Scores')
plt.title('Frequency Decoding Scores')
###############################################################################
# Loop through frequencies and time, apply classifier and save scores
# init scores
tf_scores = np.zeros((n_freqs - 1, n_windows))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
# Roll covariance, csp and lda over time
for t, w_time in enumerate(centered_w_times):
# Center the min and max of the window
w_tmin = w_time - w_size / 2.
w_tmax = w_time + w_size / 2.
# Crop data into time-window of interest
X = epochs.copy().crop(w_tmin, w_tmax).get_data()
# Save mean scores over folds for each frequency and time window
tf_scores[freq, t] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
###############################################################################
# Plot time-frequency results
# Set up time frequency object
av_tfr = AverageTFR(create_info(['freq'], sfreq), tf_scores[np.newaxis, :],
centered_w_times, freqs[1:], 1)
chance = np.mean(y) # set chance level to white in the plot
av_tfr.plot([0], vmin=chance, title="Time-Frequency Decoding Scores",
cmap=plt.cm.Reds)
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/pandas/io/formats/excel.py | 3 | 24272 | """Utilities for conversion to writer-agnostic Excel representation
"""
import re
import warnings
import itertools
import numpy as np
from pandas.compat import reduce
from pandas.io.formats.css import CSSResolver, CSSWarning
from pandas.io.formats.printing import pprint_thing
import pandas.core.common as com
from pandas.core.dtypes.common import is_float, is_scalar
from pandas.core.dtypes import missing
from pandas import Index, MultiIndex, PeriodIndex
from pandas.io.formats.format import get_level_lengths
class ExcelCell(object):
__fields__ = ('row', 'col', 'val', 'style', 'mergestart', 'mergeend')
__slots__ = __fields__
def __init__(self, row, col, val, style=None, mergestart=None,
mergeend=None):
self.row = row
self.col = col
self.val = val
self.style = style
self.mergestart = mergestart
self.mergeend = mergeend
header_style = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center",
"vertical": "top"}}
class CSSToExcelConverter(object):
"""A callable for converting CSS declarations to ExcelWriter styles
Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow),
focusing on font styling, backgrounds, borders and alignment.
Operates by first computing CSS styles in a fairly generic
way (see :meth:`compute_css`) then determining Excel style
properties from CSS properties (see :meth:`build_xlstyle`).
Parameters
----------
inherited : str, optional
CSS declarations understood to be the containing scope for the
CSS processed by :meth:`__call__`.
"""
# NB: Most of the methods here could be classmethods, as only __init__
# and __call__ make use of instance attributes. We leave them as
# instancemethods so that users can easily experiment with extensions
# without monkey-patching.
def __init__(self, inherited=None):
if inherited is not None:
inherited = self.compute_css(inherited,
self.compute_css.INITIAL_STYLE)
self.inherited = inherited
compute_css = CSSResolver()
def __call__(self, declarations_str):
"""Convert CSS declarations to ExcelWriter style
Parameters
----------
declarations_str : str
List of CSS declarations.
e.g. "font-weight: bold; background: blue"
Returns
-------
xlstyle : dict
A style as interpreted by ExcelWriter when found in
ExcelCell.style.
"""
# TODO: memoize?
properties = self.compute_css(declarations_str, self.inherited)
return self.build_xlstyle(properties)
def build_xlstyle(self, props):
out = {
'alignment': self.build_alignment(props),
'border': self.build_border(props),
'fill': self.build_fill(props),
'font': self.build_font(props),
}
# TODO: support number format
# TODO: handle cell width and height: needs support in pandas.io.excel
def remove_none(d):
"""Remove key where value is None, through nested dicts"""
for k, v in list(d.items()):
if v is None:
del d[k]
elif isinstance(v, dict):
remove_none(v)
if not v:
del d[k]
remove_none(out)
return out
VERTICAL_MAP = {
'top': 'top',
'text-top': 'top',
'middle': 'center',
'baseline': 'bottom',
'bottom': 'bottom',
'text-bottom': 'bottom',
# OpenXML also has 'justify', 'distributed'
}
def build_alignment(self, props):
# TODO: text-indent, padding-left -> alignment.indent
return {'horizontal': props.get('text-align'),
'vertical': self.VERTICAL_MAP.get(props.get('vertical-align')),
'wrap_text': (None if props.get('white-space') is None else
props['white-space'] not in
('nowrap', 'pre', 'pre-line'))
}
def build_border(self, props):
return {side: {
'style': self._border_style(props.get('border-{side}-style'
.format(side=side)),
props.get('border-{side}-width'
.format(side=side))),
'color': self.color_to_excel(
props.get('border-{side}-color'.format(side=side))),
} for side in ['top', 'right', 'bottom', 'left']}
def _border_style(self, style, width):
# convert styles and widths to openxml, one of:
# 'dashDot'
# 'dashDotDot'
# 'dashed'
# 'dotted'
# 'double'
# 'hair'
# 'medium'
# 'mediumDashDot'
# 'mediumDashDotDot'
# 'mediumDashed'
# 'slantDashDot'
# 'thick'
# 'thin'
if width is None and style is None:
return None
if style == 'none' or style == 'hidden':
return None
if width is None:
width = '2pt'
width = float(width[:-2])
if width < 1e-5:
return None
elif width < 1.3:
width_name = 'thin'
elif width < 2.8:
width_name = 'medium'
else:
width_name = 'thick'
if style in (None, 'groove', 'ridge', 'inset', 'outset'):
# not handled
style = 'solid'
if style == 'double':
return 'double'
if style == 'solid':
return width_name
if style == 'dotted':
if width_name in ('hair', 'thin'):
return 'dotted'
return 'mediumDashDotDot'
if style == 'dashed':
if width_name in ('hair', 'thin'):
return 'dashed'
return 'mediumDashed'
def build_fill(self, props):
# TODO: perhaps allow for special properties
# -excel-pattern-bgcolor and -excel-pattern-type
fill_color = props.get('background-color')
if fill_color not in (None, 'transparent', 'none'):
return {
'fgColor': self.color_to_excel(fill_color),
'patternType': 'solid',
}
BOLD_MAP = {'bold': True, 'bolder': True, '600': True, '700': True,
'800': True, '900': True,
'normal': False, 'lighter': False, '100': False, '200': False,
'300': False, '400': False, '500': False}
ITALIC_MAP = {'normal': False, 'italic': True, 'oblique': True}
def build_font(self, props):
size = props.get('font-size')
if size is not None:
assert size.endswith('pt')
size = float(size[:-2])
font_names_tmp = re.findall(r'''(?x)
(
"(?:[^"]|\\")+"
|
'(?:[^']|\\')+'
|
[^'",]+
)(?=,|\s*$)
''', props.get('font-family', ''))
font_names = []
for name in font_names_tmp:
if name[:1] == '"':
name = name[1:-1].replace('\\"', '"')
elif name[:1] == '\'':
name = name[1:-1].replace('\\\'', '\'')
else:
name = name.strip()
if name:
font_names.append(name)
family = None
for name in font_names:
if name == 'serif':
family = 1 # roman
break
elif name == 'sans-serif':
family = 2 # swiss
break
elif name == 'cursive':
family = 4 # script
break
elif name == 'fantasy':
family = 5 # decorative
break
decoration = props.get('text-decoration')
if decoration is not None:
decoration = decoration.split()
else:
decoration = ()
return {
'name': font_names[0] if font_names else None,
'family': family,
'size': size,
'bold': self.BOLD_MAP.get(props.get('font-weight')),
'italic': self.ITALIC_MAP.get(props.get('font-style')),
'underline': ('single' if
'underline' in decoration
else None),
'strike': ('line-through' in decoration) or None,
'color': self.color_to_excel(props.get('color')),
# shadow if nonzero digit before shadow color
'shadow': (bool(re.search('^[^#(]*[1-9]',
props['text-shadow']))
if 'text-shadow' in props else None),
# 'vertAlign':,
# 'charset': ,
# 'scheme': ,
# 'outline': ,
# 'condense': ,
}
NAMED_COLORS = {
'maroon': '800000',
'brown': 'A52A2A',
'red': 'FF0000',
'pink': 'FFC0CB',
'orange': 'FFA500',
'yellow': 'FFFF00',
'olive': '808000',
'green': '008000',
'purple': '800080',
'fuchsia': 'FF00FF',
'lime': '00FF00',
'teal': '008080',
'aqua': '00FFFF',
'blue': '0000FF',
'navy': '000080',
'black': '000000',
'gray': '808080',
'grey': '808080',
'silver': 'C0C0C0',
'white': 'FFFFFF',
}
def color_to_excel(self, val):
if val is None:
return None
if val.startswith('#') and len(val) == 7:
return val[1:].upper()
if val.startswith('#') and len(val) == 4:
return (val[1] * 2 + val[2] * 2 + val[3] * 2).upper()
try:
return self.NAMED_COLORS[val]
except KeyError:
warnings.warn('Unhandled color format: {val!r}'.format(val=val),
CSSWarning)
class ExcelFormatter(object):
"""
Class for formatting a DataFrame to a list of ExcelCells,
Parameters
----------
df : DataFrame or Styler
na_rep: na representation
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
output row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
merge_cells : boolean, default False
Format MultiIndex and Hierarchical Rows as merged cells.
inf_rep : string, default `'inf'`
representation for np.inf values (which aren't representable in Excel)
A `'-'` sign will be added in front of -inf.
style_converter : callable, optional
This translates Styler styles (CSS) into ExcelWriter styles.
Defaults to ``CSSToExcelConverter()``.
It should have signature css_declarations string -> excel style.
This is only called for body cells.
"""
def __init__(self, df, na_rep='', float_format=None, cols=None,
header=True, index=True, index_label=None, merge_cells=False,
inf_rep='inf', style_converter=None):
self.rowcounter = 0
self.na_rep = na_rep
if hasattr(df, 'render'):
self.styler = df
df = df.data
if style_converter is None:
style_converter = CSSToExcelConverter()
self.style_converter = style_converter
else:
self.styler = None
self.df = df
if cols is not None:
# all missing, raise
if not len(Index(cols) & df.columns):
raise KeyError(
"passes columns are not ALL present dataframe")
# deprecatedin gh-17295
# 1 missing is ok (for now)
if len(Index(cols) & df.columns) != len(cols):
warnings.warn(
"Not all names specified in 'columns' are found; "
"this will raise a KeyError in the future",
FutureWarning)
self.df = df.reindex(columns=cols)
self.columns = self.df.columns
self.float_format = float_format
self.index = index
self.index_label = index_label
self.header = header
self.merge_cells = merge_cells
self.inf_rep = inf_rep
def _format_value(self, val):
if is_scalar(val) and missing.isna(val):
val = self.na_rep
elif is_float(val):
if missing.isposinf_scalar(val):
val = self.inf_rep
elif missing.isneginf_scalar(val):
val = '-{inf}'.format(inf=self.inf_rep)
elif self.float_format is not None:
val = float(self.float_format % val)
return val
def _format_header_mi(self):
if self.columns.nlevels > 1:
if not self.index:
raise NotImplementedError("Writing to Excel with MultiIndex"
" columns and no index "
"('index'=False) is not yet "
"implemented.")
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if not (has_aliases or self.header):
return
columns = self.columns
level_strs = columns.format(sparsify=self.merge_cells, adjoin=False,
names=False)
level_lengths = get_level_lengths(level_strs)
coloffset = 0
lnum = 0
if self.index and isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0]) - 1
if self.merge_cells:
# Format multi-index as a merged cells.
for lnum in range(len(level_lengths)):
name = columns.names[lnum]
yield ExcelCell(lnum, coloffset, name, header_style)
for lnum, (spans, levels, labels) in enumerate(zip(
level_lengths, columns.levels, columns.labels)):
values = levels.take(labels)
for i in spans:
if spans[i] > 1:
yield ExcelCell(lnum, coloffset + i + 1, values[i],
header_style, lnum,
coloffset + i + spans[i])
else:
yield ExcelCell(lnum, coloffset + i + 1, values[i],
header_style)
else:
# Format in legacy format with dots to indicate levels.
for i, values in enumerate(zip(*level_strs)):
v = ".".join(map(pprint_thing, values))
yield ExcelCell(lnum, coloffset + i + 1, v, header_style)
self.rowcounter = lnum
def _format_header_regular(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
coloffset = 0
if self.index:
coloffset = 1
if isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0])
colnames = self.columns
if has_aliases:
if len(self.header) != len(self.columns):
raise ValueError('Writing {cols} cols but got {alias} '
'aliases'.format(cols=len(self.columns),
alias=len(self.header)))
else:
colnames = self.header
for colindex, colname in enumerate(colnames):
yield ExcelCell(self.rowcounter, colindex + coloffset, colname,
header_style)
def _format_header(self):
if isinstance(self.columns, MultiIndex):
gen = self._format_header_mi()
else:
gen = self._format_header_regular()
gen2 = ()
if self.df.index.names:
row = [x if x is not None else ''
for x in self.df.index.names] + [''] * len(self.columns)
if reduce(lambda x, y: x and y, map(lambda x: x != '', row)):
gen2 = (ExcelCell(self.rowcounter, colindex, val, header_style)
for colindex, val in enumerate(row))
self.rowcounter += 1
return itertools.chain(gen, gen2)
def _format_body(self):
if isinstance(self.df.index, MultiIndex):
return self._format_hierarchical_rows()
else:
return self._format_regular_rows()
def _format_regular_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
self.rowcounter += 1
# output index and index_label?
if self.index:
# check aliases
# if list only take first as this is not a MultiIndex
if (self.index_label and
isinstance(self.index_label, (list, tuple, np.ndarray,
Index))):
index_label = self.index_label[0]
# if string good to go
elif self.index_label and isinstance(self.index_label, str):
index_label = self.index_label
else:
index_label = self.df.index.names[0]
if isinstance(self.columns, MultiIndex):
self.rowcounter += 1
if index_label and self.header is not False:
yield ExcelCell(self.rowcounter - 1, 0, index_label,
header_style)
# write index_values
index_values = self.df.index
if isinstance(self.df.index, PeriodIndex):
index_values = self.df.index.to_timestamp()
for idx, idxval in enumerate(index_values):
yield ExcelCell(self.rowcounter + idx, 0, idxval, header_style)
coloffset = 1
else:
coloffset = 0
for cell in self._generate_body(coloffset):
yield cell
def _format_hierarchical_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
self.rowcounter += 1
gcolidx = 0
if self.index:
index_labels = self.df.index.names
# check for aliases
if (self.index_label and
isinstance(self.index_label, (list, tuple, np.ndarray,
Index))):
index_labels = self.index_label
# MultiIndex columns require an extra row
# with index names (blank if None) for
# unambigous round-trip, unless not merging,
# in which case the names all go on one row Issue #11328
if isinstance(self.columns, MultiIndex) and self.merge_cells:
self.rowcounter += 1
# if index labels are not empty go ahead and dump
if com._any_not_none(*index_labels) and self.header is not False:
for cidx, name in enumerate(index_labels):
yield ExcelCell(self.rowcounter - 1, cidx, name,
header_style)
if self.merge_cells:
# Format hierarchical rows as merged cells.
level_strs = self.df.index.format(sparsify=True, adjoin=False,
names=False)
level_lengths = get_level_lengths(level_strs)
for spans, levels, labels in zip(level_lengths,
self.df.index.levels,
self.df.index.labels):
values = levels.take(labels,
allow_fill=levels._can_hold_na,
fill_value=True)
for i in spans:
if spans[i] > 1:
yield ExcelCell(self.rowcounter + i, gcolidx,
values[i], header_style,
self.rowcounter + i + spans[i] - 1,
gcolidx)
else:
yield ExcelCell(self.rowcounter + i, gcolidx,
values[i], header_style)
gcolidx += 1
else:
# Format hierarchical rows with non-merged values.
for indexcolvals in zip(*self.df.index):
for idx, indexcolval in enumerate(indexcolvals):
yield ExcelCell(self.rowcounter + idx, gcolidx,
indexcolval, header_style)
gcolidx += 1
for cell in self._generate_body(gcolidx):
yield cell
def _generate_body(self, coloffset):
if self.styler is None:
styles = None
else:
styles = self.styler._compute().ctx
if not styles:
styles = None
xlstyle = None
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
series = self.df.iloc[:, colidx]
for i, val in enumerate(series):
if styles is not None:
xlstyle = self.style_converter(';'.join(styles[i, colidx]))
yield ExcelCell(self.rowcounter + i, colidx + coloffset, val,
xlstyle)
def get_formatted_cells(self):
for cell in itertools.chain(self._format_header(),
self._format_body()):
cell.val = self._format_value(cell.val)
yield cell
def write(self, writer, sheet_name='Sheet1', startrow=0,
startcol=0, freeze_panes=None, engine=None):
"""
writer : string or ExcelWriter object
File path or existing ExcelWriter
sheet_name : string, default 'Sheet1'
Name of sheet which will contain DataFrame
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
freeze_panes : tuple of integer (length 2), default None
Specifies the one-based bottommost row and rightmost column that
is to be frozen
engine : string, default None
write engine to use if writer is a path - you can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``,
and ``io.excel.xlsm.writer``.
"""
from pandas.io.excel import ExcelWriter
from pandas.io.common import _stringify_path
if isinstance(writer, ExcelWriter):
need_save = False
else:
writer = ExcelWriter(_stringify_path(writer), engine=engine)
need_save = True
formatted_cells = self.get_formatted_cells()
writer.write_cells(formatted_cells, sheet_name,
startrow=startrow, startcol=startcol,
freeze_panes=freeze_panes)
if need_save:
writer.save()
| mit |
wangmiao1981/spark | python/pyspark/pandas/plot/matplotlib.py | 14 | 30172 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import matplotlib as mat
import numpy as np
import pandas as pd
from matplotlib.axes._base import _process_plot_format
from pandas.core.dtypes.inference import is_list_like
from pandas.io.formats.printing import pprint_thing
from pyspark.pandas.plot import (
TopNPlotBase,
SampledPlotBase,
HistogramPlotBase,
BoxPlotBase,
unsupported_function,
KdePlotBase,
)
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
from pandas.plotting._core import (
_all_kinds,
BarPlot as PandasBarPlot,
BoxPlot as PandasBoxPlot,
HistPlot as PandasHistPlot,
MPLPlot as PandasMPLPlot,
PiePlot as PandasPiePlot,
AreaPlot as PandasAreaPlot,
LinePlot as PandasLinePlot,
BarhPlot as PandasBarhPlot,
ScatterPlot as PandasScatterPlot,
KdePlot as PandasKdePlot,
)
else:
from pandas.plotting._matplotlib import (
BarPlot as PandasBarPlot,
BoxPlot as PandasBoxPlot,
HistPlot as PandasHistPlot,
PiePlot as PandasPiePlot,
AreaPlot as PandasAreaPlot,
LinePlot as PandasLinePlot,
BarhPlot as PandasBarhPlot,
ScatterPlot as PandasScatterPlot,
KdePlot as PandasKdePlot,
)
from pandas.plotting._core import PlotAccessor
from pandas.plotting._matplotlib.core import MPLPlot as PandasMPLPlot
_all_kinds = PlotAccessor._all_kinds
class PandasOnSparkBarPlot(PandasBarPlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _plot(self, ax, x, y, w, start=0, log=False, **kwds):
self.set_result_text(ax)
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
class PandasOnSparkBoxPlot(PandasBoxPlot, BoxPlotBase):
def boxplot(
self,
ax,
bxpstats,
notch=None,
sym=None,
vert=None,
whis=None,
positions=None,
widths=None,
patch_artist=None,
bootstrap=None,
usermedians=None,
conf_intervals=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
boxprops=None,
labels=None,
flierprops=None,
medianprops=None,
meanprops=None,
capprops=None,
whiskerprops=None,
manage_ticks=None,
# manage_xticks is for compatibility of matplotlib < 3.1.0.
# Remove this when minimum version is 3.0.0
manage_xticks=None,
autorange=False,
zorder=None,
precision=None,
):
def update_dict(dictionary, rc_name, properties):
"""Loads properties in the dictionary from rc file if not already
in the dictionary"""
rc_str = "boxplot.{0}.{1}"
if dictionary is None:
dictionary = dict()
for prop_dict in properties:
dictionary.setdefault(prop_dict, mat.rcParams[rc_str.format(rc_name, prop_dict)])
return dictionary
# Common property dictionaries loading from rc
flier_props = [
"color",
"marker",
"markerfacecolor",
"markeredgecolor",
"markersize",
"linestyle",
"linewidth",
]
default_props = ["color", "linewidth", "linestyle"]
boxprops = update_dict(boxprops, "boxprops", default_props)
whiskerprops = update_dict(whiskerprops, "whiskerprops", default_props)
capprops = update_dict(capprops, "capprops", default_props)
medianprops = update_dict(medianprops, "medianprops", default_props)
meanprops = update_dict(meanprops, "meanprops", default_props)
flierprops = update_dict(flierprops, "flierprops", flier_props)
if patch_artist:
boxprops["linestyle"] = "solid"
boxprops["edgecolor"] = boxprops.pop("color")
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == "":
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle="none", marker="", color="none")
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops["marker"] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops["color"] = color
flierprops["markerfacecolor"] = color
flierprops["markeredgecolor"] = color
# replace medians if necessary:
if usermedians is not None:
if len(np.ravel(usermedians)) != len(bxpstats) or np.shape(usermedians)[0] != len(
bxpstats
):
raise ValueError("usermedians length not compatible with x")
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats["med"] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
err_mess = "conf_intervals length not compatible with x"
raise ValueError(err_mess)
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError("each confidence interval must " "have two values")
else:
if ci[0] is not None:
stats["cilo"] = ci[0]
if ci[1] is not None:
stats["cihi"] = ci[1]
should_manage_ticks = True
if manage_xticks is not None:
should_manage_ticks = manage_xticks
if manage_ticks is not None:
should_manage_ticks = manage_ticks
if LooseVersion(mat.__version__) < LooseVersion("3.1.0"):
extra_args = {"manage_xticks": should_manage_ticks}
else:
extra_args = {"manage_ticks": should_manage_ticks}
artists = ax.bxp(
bxpstats,
positions=positions,
widths=widths,
vert=vert,
patch_artist=patch_artist,
shownotches=notch,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
meanprops=meanprops,
meanline=meanline,
showfliers=showfliers,
capprops=capprops,
whiskerprops=whiskerprops,
zorder=zorder,
**extra_args,
)
return artists
def _plot(self, ax, bxpstats, column_num=None, return_type="axes", **kwds):
bp = self.boxplot(ax, bxpstats, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _compute_plot_data(self):
colname = self.data.name
spark_column_name = self.data._internal.spark_column_name_for(self.data._column_label)
data = self.data
# Updates all props with the rc defaults from matplotlib
self.kwds.update(PandasOnSparkBoxPlot.rc_defaults(**self.kwds))
# Gets some important kwds
showfliers = self.kwds.get("showfliers", False)
whis = self.kwds.get("whis", 1.5)
labels = self.kwds.get("labels", [colname])
# This one is pandas-on-Spark specific to control precision for approx_percentile
precision = self.kwds.get("precision", 0.01)
# # Computes mean, median, Q1 and Q3 with approx_percentile and precision
col_stats, col_fences = BoxPlotBase.compute_stats(data, spark_column_name, whis, precision)
# # Creates a column to flag rows as outliers or not
outliers = BoxPlotBase.outliers(data, spark_column_name, *col_fences)
# # Computes min and max values of non-outliers - the whiskers
whiskers = BoxPlotBase.calc_whiskers(spark_column_name, outliers)
if showfliers:
fliers = BoxPlotBase.get_fliers(spark_column_name, outliers, whiskers[0])
else:
fliers = []
# Builds bxpstats dict
stats = []
item = {
"mean": col_stats["mean"],
"med": col_stats["med"],
"q1": col_stats["q1"],
"q3": col_stats["q3"],
"whislo": whiskers[0],
"whishi": whiskers[1],
"fliers": fliers,
"label": labels[0],
}
stats.append(item)
self.data = {labels[0]: stats}
def _make_plot(self):
bxpstats = list(self.data.values())[0]
ax = self._get_ax(0)
kwds = self.kwds.copy()
for stats in bxpstats:
if len(stats["fliers"]) > 1000:
stats["fliers"] = stats["fliers"][:1000]
ax.text(
1,
1,
"showing top 1,000 fliers only",
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
ret, bp = self._plot(ax, bxpstats, column_num=0, return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self.data.items()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
@staticmethod
def rc_defaults(
notch=None,
vert=None,
whis=None,
patch_artist=None,
bootstrap=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
**kwargs
):
# Missing arguments default to rcParams.
if whis is None:
whis = mat.rcParams["boxplot.whiskers"]
if bootstrap is None:
bootstrap = mat.rcParams["boxplot.bootstrap"]
if notch is None:
notch = mat.rcParams["boxplot.notch"]
if vert is None:
vert = mat.rcParams["boxplot.vertical"]
if patch_artist is None:
patch_artist = mat.rcParams["boxplot.patchartist"]
if meanline is None:
meanline = mat.rcParams["boxplot.meanline"]
if showmeans is None:
showmeans = mat.rcParams["boxplot.showmeans"]
if showcaps is None:
showcaps = mat.rcParams["boxplot.showcaps"]
if showbox is None:
showbox = mat.rcParams["boxplot.showbox"]
if showfliers is None:
showfliers = mat.rcParams["boxplot.showfliers"]
return dict(
whis=whis,
bootstrap=bootstrap,
notch=notch,
vert=vert,
patch_artist=patch_artist,
meanline=meanline,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
showfliers=showfliers,
)
class PandasOnSparkHistPlot(PandasHistPlot, HistogramPlotBase):
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _compute_plot_data(self):
self.data, self.bins = HistogramPlotBase.prepare_hist_data(self.data, self.bins)
def _make_plot(self):
# TODO: this logic is similar with KdePlot. Might have to deduplicate it.
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
output_series = HistogramPlotBase.compute_hist(self.data, self.bins)
for (i, label), y in zip(enumerate(self.data._internal.column_labels), output_series):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
base = np.zeros(len(bins) - 1)
bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"])
# Since the counts were computed already, we use them as weights and just generate
# one entry for each bin
n, bins, patches = ax.hist(bins[:-1], bins=bins, bottom=bottom, weights=y, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
class PandasOnSparkPiePlot(PandasPiePlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkAreaPlot(PandasAreaPlot, SampledPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkLinePlot(PandasLinePlot, SampledPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkBarhPlot(PandasBarhPlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkScatterPlot(PandasScatterPlot, TopNPlotBase):
def __init__(self, data, x, y, **kwargs):
super().__init__(self.get_top_n(data), x, y, **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkKdePlot(PandasKdePlot, KdePlotBase):
def _compute_plot_data(self):
self.data = KdePlotBase.prepare_kde_data(self.data)
def _make_plot(self):
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
sdf = self.data._internal.spark_frame
for i, label in enumerate(self.data._internal.column_labels):
# 'y' is a Spark DataFrame that selects one column.
y = sdf.select(self.data._internal.spark_column_for(label))
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _get_ind(self, y):
return KdePlotBase.get_ind(y, self.ind)
@classmethod
def _plot(
cls, ax, y, style=None, bw_method=None, ind=None, column_num=None, stacking_id=None, **kwds
):
y = KdePlotBase.compute_kde(y, bw_method=bw_method, ind=ind)
lines = PandasMPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
_klasses = [
PandasOnSparkHistPlot,
PandasOnSparkBarPlot,
PandasOnSparkBoxPlot,
PandasOnSparkPiePlot,
PandasOnSparkAreaPlot,
PandasOnSparkLinePlot,
PandasOnSparkBarhPlot,
PandasOnSparkScatterPlot,
PandasOnSparkKdePlot,
]
_plot_klass = {getattr(klass, "_kind"): klass for klass in _klasses}
_common_kinds = {"area", "bar", "barh", "box", "hist", "kde", "line", "pie"}
_series_kinds = _common_kinds.union(set())
_dataframe_kinds = _common_kinds.union({"scatter", "hexbin"})
_pandas_on_spark_all_kinds = _common_kinds.union(_series_kinds).union(_dataframe_kinds)
def plot_pandas_on_spark(data, kind, **kwargs):
if kind not in _pandas_on_spark_all_kinds:
raise ValueError("{} is not a valid plot kind".format(kind))
from pyspark.pandas import DataFrame, Series
if isinstance(data, Series):
if kind not in _series_kinds:
return unsupported_function(class_name="pd.Series", method_name=kind)()
return plot_series(data=data, kind=kind, **kwargs)
elif isinstance(data, DataFrame):
if kind not in _dataframe_kinds:
return unsupported_function(class_name="pd.DataFrame", method_name=kind)()
return plot_frame(data=data, kind=kind, **kwargs)
def plot_series(
data,
kind="line",
ax=None, # Series unique
figsize=None,
use_index=True,
title=None,
grid=None,
legend=False,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
label=None,
secondary_y=False, # Series unique
**kwds
):
"""
Make plots of Series using matplotlib / pylab.
Each plot kind has a corresponding method on the
``Series.plot`` accessor:
``s.plot(kind='line')`` is equivalent to
``s.plot.line()``.
Parameters
----------
data : Series
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
ax : matplotlib axes object
If not passed, uses gca()
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
# function copied from pandas.plotting._core
# so it calls modified _plot below
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
with plt.rc_context():
ax = plt.gca()
ax = PandasMPLPlot._get_ax_layer(ax)
return _plot(
data,
kind=kind,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
label=label,
secondary_y=secondary_y,
**kwds,
)
def plot_frame(
data,
x=None,
y=None,
kind="line",
ax=None,
subplots=None,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwds
):
"""
Make plots of DataFrames using matplotlib / pylab.
Each plot kind has a corresponding method on the
``DataFrame.plot`` accessor:
``psdf.plot(kind='line')`` is equivalent to
``psdf.plot.line()``.
Parameters
----------
data : DataFrame
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot
ax : matplotlib axes object
If not passed, uses gca()
x : label or position, default None
y : label, position or list of label, positions, default None
Allows plotting of one column versus another.
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
sharex: bool or None, default is None
Whether to share x axis or not.
sharey: bool, default is False
Whether to share y axis or not.
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
sort_columns: bool, default is False
When True, will sort values on plots.
**kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
"""
return _plot(
data,
kind=kind,
x=x,
y=y,
ax=ax,
figsize=figsize,
use_index=use_index,
title=title,
grid=grid,
legend=legend,
subplots=subplots,
style=style,
logx=logx,
logy=logy,
loglog=loglog,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
rot=rot,
fontsize=fontsize,
colormap=colormap,
table=table,
yerr=yerr,
xerr=xerr,
sharex=sharex,
sharey=sharey,
secondary_y=secondary_y,
layout=layout,
sort_columns=sort_columns,
**kwds,
)
def _plot(data, x=None, y=None, subplots=False, ax=None, kind="line", **kwds):
from pyspark.pandas import DataFrame
# function copied from pandas.plotting._core
# and adapted to handle pandas-on-Spark DataFrame and Series
kind = kind.lower().strip()
kind = {"density": "kde"}.get(kind, kind)
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
# scatter and hexbin are inherited from PlanePlot which require x and y
if kind in ("scatter", "hexbin"):
plot_obj = klass(data, x, y, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
# check data type and do preprocess before applying plot
if isinstance(data, DataFrame):
if x is not None:
data = data.set_index(x)
# TODO: check if value of y is plottable
if y is not None:
data = data[y]
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
| apache-2.0 |
choderalab/LiquidBenchmark | src/figures/find_worst_offenders.py | 2 | 1827 | import sklearn.metrics, sklearn.cross_validation
import statsmodels.formula.api as sm
import simtk.unit as u
import polarizability
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
sns.set_palette("bright")
sns.set_style("whitegrid")
sns.set(font_scale=1.2)
expt0 = pd.read_csv("./tables/data_dielectric.csv")
expt = pd.read_csv("./tables/data_with_metadata.csv")
expt["temperature"] = expt["Temperature, K"]
pred = pd.read_csv("./tables/predictions.csv")
pred["polcorr"] = pd.Series(dict((cas, polarizability.dielectric_correction_from_formula(formula, density * u.grams / u.milliliter)) for cas, (formula, density) in pred[["formula", "density"]].iterrows()))
pred["corrected_dielectric"] = pred["polcorr"] + pred["dielectric"]
expt = expt.set_index(["cas", "temperature"]) # Can't do this because of duplicates # Should be fixed now, probably due to the CAS / name duplication issue found by Julie.
#expt = expt.groupby(["cas", "temperature"]).mean() # Fix a couple of duplicates, not sure how they got there.
pred = pred.set_index(["cas", "temperature"])
pred["expt_density"] = expt["Mass density, kg/m3"]
pred["expt_dielectric"] = expt["Relative permittivity at zero frequency"]
#pred["expt_density_std"] = expt["Mass density, kg/m3_std"]
pred["expt_density_std"] = expt["Mass density, kg/m3_uncertainty_bestguess"]
#pred["expt_dielectric_std"] = expt["Relative permittivity at zero frequency_std"]
pred["expt_dielectric_std"] = expt["Relative permittivity at zero frequency_uncertainty_bestguess"]
q = abs(pred.density - pred.expt_density)
q.sort()
cas = q.reset_index()[-20:].cas.unique()
expt0[expt0.cas.isin(cas)].components.unique()
pred.density.mean()
pred.density.std()
pred.density.std() / sqrt(len(pred))
pred.expt_density.mean()
pred.expt_density.std() / sqrt(len(pred))
| gpl-2.0 |
eickenberg/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 30 | 1812 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
"""
Test BayesianRidge on diabetes
"""
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
"""
Test BayesianRidge on toy
"""
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
"""
Test BayesianRegression ARD classifier
"""
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
bsmurphy/PyKrige | tests/test_api.py | 1 | 1420 | from itertools import product
import numpy as np
import pytest
from pykrige.compat import Krige
from pykrige.compat import threed_krige
def _method_and_vergiogram():
method = ["ordinary", "universal", "ordinary3d", "universal3d"]
variogram_model = ["linear", "power", "gaussian", "spherical", "exponential"]
return product(method, variogram_model)
def test_krige():
# dummy data
pytest.importorskip("sklearn")
from sklearn.model_selection import GridSearchCV
np.random.seed(1)
X = np.random.randint(0, 400, size=(20, 3)).astype(float)
y = 5 * np.random.rand(20)
for m, v in _method_and_vergiogram():
param_dict = {"method": [m], "variogram_model": [v]}
estimator = GridSearchCV(
Krige(),
param_dict,
n_jobs=-1,
pre_dispatch="2*n_jobs",
verbose=False,
return_train_score=True,
cv=5,
)
# run the gridsearch
if m in ["ordinary", "universal"]:
estimator.fit(X=X[:, :2], y=y)
else:
estimator.fit(X=X, y=y)
if hasattr(estimator, "best_score_"):
if m in threed_krige:
assert estimator.best_score_ > -10.0
else:
assert estimator.best_score_ > -3.0
if hasattr(estimator, "cv_results_"):
assert estimator.cv_results_["mean_train_score"] > 0
| bsd-3-clause |
vshtanko/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
fnguyen01/xsmurf | utils/compute_2dfBm_divfree.py | 2 | 11637 | #!/usr/bin/env python
"""
Generate 2d fractional Brownian vector fields (fBm) as two numpy arrays.
Provide a routine to save in either matlab or xsmurf file format.
Use Hodge-Helmholtz decomposition to obtain the divergence free part.
"""
__author__ = "Pierre Kestener"
__license__ = "GPL"
# numerical packages
import numpy as np
# Fast Fourier Transform
from scipy.fftpack import fft, ifft, fftn, ifftn
def genFbm2d_scalar(nx,ny,h=0.5):
"""
Generate 2D fBm scalar field (assume nx and ny are even)
h : Holder exponent in [0, 1]
if h == 0.5 : regular Brownian motion
if h != 0.5 : fractional Brownian motion
"""
# initialize Fourier coef
fftCoeff = np.zeros((nx,ny)).astype(complex)
print fftCoeff.flags
print fftCoeff.shape
# fill half Fourier, and the other half with complex conjugate
for i in range(nx):
# compute kx
kx = i
if i>nx/2:
kx = i - nx
# compute i2 (central symmetry)
if (i==0):
i2=0
elif (i==nx/2):
i2=nx/2
else:
i2=nx-i
for j in range(ny/2+1):
# compute ky
ky = j
# compute j2 (central symmetry)
if (j==0):
j2=0
elif (j==ny/2):
j2=ny/2
else:
j2=ny-j
kSquare = 1.0*(kx**2+ky**2)
if kSquare>0:
radius = np.power(kSquare, -(2*h+2)/4) * np.random.normal()
phase = 2 * np.pi * np.random.uniform()
else:
radius = 1.0
phase = 0.0
# fill fourier coefficient so that ifft is real (imag = 0)
fftCoeff[i ,j ] = radius*np.cos(phase) + 1j*radius*np.sin(phase)
fftCoeff[i2,j2] = radius*np.cos(phase) - 1j*radius*np.sin(phase)
# make sure that Fourier coef at i=0, j=ny/2 is real
fftCoeff[0,ny/2] = np.real(fftCoeff[0,ny/2]) + 1j*0
# make sure that Fourier coef at i=nx/2, j=0 is real
fftCoeff[nx/2,0] = np.real(fftCoeff[nx/2,0]) + 1j*0
# make sure that Fourier coef at i=nx/2, j=ny/2 is real
fftCoeff[nx/2,ny/2] = np.real(fftCoeff[nx/2,ny/2]) + 1j*0
return ifftn(fftCoeff)
def genFbm3d_scalar(nx,ny,nz,h=0.5):
"""
Create 3D fBm scalar field (assume nx, ny, nz are even)
h : Holder exponent in [0, 1]
if h == 0.5 : regular Brownian motion
if h != 0.5 : fractional Brownian motion
"""
# initialize Fourier coef
fftCoeff = np.zeros((nx,ny,nz)).astype(complex)
# fill half Fourier space, and the other half using conplex conjugate
for i in range(nx):
# compute kx
kx = i
if i>nx/2:
kx = i - nx
# compute i2
if (i==0):
i2=0
elif (i==nx/2):
i2=nx/2
else:
i2=nx-i
for j in range(ny):
# compute ky
ky = j
if j>ny/2:
ky = j - ny
# compute j2
if (j==0):
j2=0
elif (j==ny/2):
j2=ny/2
else:
j2=ny-j
for k in range(nz/2+1):
# compute kz
kz = k
# compute k2
if (k==0):
k2=0
elif (k==nz/2):
k2=nz/2
else:
k2=nz-k
kSquare = 1.0*(kx**2+ky**2+kz**2)
if kSquare>0:
radius = np.power(kSquare, -(2*h+3)/4) * np.random.normal()
phase = 2 * np.pi * np.random.uniform()
else:
radius = 1.0
phase = 0.0
# fill Fourier coef so that ifft is real
fftCoeff[i ,j ,k ] = radius*np.cos(phase) + 1j*radius*np.sin(phase)
fftCoeff[i2,j2,k2] = radius*np.cos(phase) - 1j*radius*np.sin(phase)
# enforce symmetries for a real valued field
# make sure that Fourier coef at i=nx/2 ... is real
fftCoeff[nx/2,0 ,0 ] = np.real(fftCoeff[nx/2,0 ,0 ]) + 1j*0
fftCoeff[0 ,ny/2,0 ] = np.real(fftCoeff[0 ,ny/2,0 ]) + 1j*0
fftCoeff[0 ,0 ,nz/2] = np.real(fftCoeff[0 ,0 ,nz/2]) + 1j*0
fftCoeff[nx/2,ny/2,0 ] = np.real(fftCoeff[nx/2,ny/2,0 ]) + 1j*0
fftCoeff[nx/2,0 ,nz/2] = np.real(fftCoeff[nx/2,0 ,nz/2]) + 1j*0
fftCoeff[0 ,ny/2,nz/2] = np.real(fftCoeff[0 ,ny/2,nz/2]) + 1j*0
fftCoeff[nx/2,ny/2,nz/2] = np.real(fftCoeff[nx/2,ny/2,nz/2]) + 1j*0
return ifftn(fftCoeff)
def compute_div_free_2d(vx,vy):
"""
Compute Hodge-Helmholtz decomposition of the vector field (vx,vy) using a Fourier method.
Hodge-Helmholtz theorem is
V = -nabla(phi) + nabla ^ A
So here, we compute W=nabla^A, using the Leray projector W=PV
with P defined by P_kj = \delta_kj + (ksi_k ksi_j) / |ksi|^2
return (wx,wy) : the div-free velocity projection
"""
# retrieve array sizes
nx,ny = vx.shape
# compute forward FFT
vx_fft = fftn(vx)
vy_fft = fftn(vy)
# initialize to zero the div free velocity field
wx_fft = np.zeros((nx,ny)).astype(complex)
wy_fft = np.zeros((nx,ny)).astype(complex)
# fill half Fourier, and the other half with complex conjugate
for i in range(nx):
# compute kx
kx = i
if i>nx/2:
kx = i - nx
# compute i2
if (i==0):
i2=0
elif (i==nx/2):
i2=nx/2
else:
i2=nx-i
for j in range(ny/2+1):
# compute ky
ky = j
# compute j2
if (j==0):
j2=0
elif (j==ny/2):
j2=ny/2
else:
j2=ny-j
kSquare = 1.0*(kx**2+ky**2)
# don't do anything about the continuous component
if kx==0 and ky==0:
kSquare = 1.0
# Leray tensor:
m_00 = ky**2/kSquare
m_01 = -(kx*ky)/kSquare
m_10 = -(kx*ky)/kSquare
m_11 = kx**2/kSquare
# fill fourier coefficient so that inverse FFT is real
tmp0 = m_00 * vx_fft[i,j] + m_01 * vy_fft[i,j]
tmp1 = m_10 * vx_fft[i,j] + m_11 * vy_fft[i,j]
wx_fft[i, j ] = tmp0
wx_fft[i2, j2] = np.conj(tmp0)
wy_fft[i, j ] = tmp1
wy_fft[i2, j2] = np.conj(tmp1)
# make sure that Fourier coef at i=0, j=ny/2 is real
wx_fft[0,ny/2] = np.real(wx_fft[0,ny/2]) + 1j*0
wy_fft[0,ny/2] = np.real(wy_fft[0,ny/2]) + 1j*0
# make sure that Fourier coef at i=nx/2, j=0 is real
wx_fft[nx/2,0] = np.real(wx_fft[nx/2,0]) + 1j*0
wy_fft[nx/2,0] = np.real(wy_fft[nx/2,0]) + 1j*0
# make sure that Fourier coef at i=nx/2, j=ny/2 is real
wx_fft[nx/2,ny/2] = np.real(wx_fft[nx/2,ny/2]) + 1j*0
wy_fft[nx/2,ny/2] = np.real(wy_fft[nx/2,ny/2]) + 1j*0
# return the result div free velocity field
return ifftn(wx_fft),ifftn(wy_fft)
def saveMatlab(filename_prefix,data):
"""
Save data array in a file using Matlab file format.
filename_prefix (no .mat)
"""
# import routine for matlab file format
import scipy.io as sio
# save data
sio.savemat(filename_prefix+'.mat', {filename_prefix:data})
def saveXsm(filename_prefix,data):
"""
Save numpy 2d data array using xsmurf file format.
Suffix .xsm will be added.
"""
filename = filename_prefix + '.xsm'
# get data shape
nx,ny = data.shape
# open file
f = open(filename, 'w')
# write the one line xsmurf header
f.write("Binary 1 {0}x{1} {2}(4 byte reals)\n".format(nx,ny,nx*ny))
# write heavy data
data.astype(np.float32).tofile(f)
# close file
f.close()
def demo_plot(vx, vy, wx, wy):
import matplotlib.pyplot as plt
#Y,X = np.mgrid[0:nx, 0:ny]
x = np.linspace(0,nx-1,nx)
y = np.linspace(0,ny-1,ny)
X,Y = np.meshgrid(x, y, indexing='ij')
# for colormap
v = np.sqrt(vx**2+vy**2)
w = np.sqrt(wx**2+wy**2)
# plot vx,vy
plt.subplot(231)
plt.imshow(vx)
plt.title('vx')
plt.subplot(232)
plt.imshow(vy)
plt.title('vy')
plt.subplot(233)
#plt.streamplot(X,Y,vx,vy,color=v,density=1.5)
plt.streamplot(Y,X,vy,vx,color=v,density=1.5)
plt.colorbar()
plt.title('(vx,vy)')
# plot wx,wy
plt.subplot(234)
plt.imshow(wx)
plt.title('wx')
plt.subplot(235)
plt.imshow(wy)
plt.title('wy')
plt.subplot(236)
#plt.streamplot(X,Y,wx,wy,color=w,density=1.5)
plt.streamplot(Y,X,wy,wx,color=w,density=1.5)
plt.colorbar()
plt.title('(wx,wy)')
plt.show()
def check_divergence(vx, vy, wx, wy):
"""
Compute divergence of vector fields (vx,vy) and (wx,wy)
and plot.
"""
# compute divergence of (vx,vy)
dxx,dxy=np.gradient(vx)
dyx,dyy=np.gradient(vy)
div_v = dxx+dyy
# check that wx,wy is really divergence-free
dxx,dxy=np.gradient(wx)
dyx,dyy=np.gradient(wy)
div_w = dxx+dyy
L2_div_v = np.sum(div_v**2)**(0.5)
L2_div_w = np.sum(div_w**2)**(0.5)
print("average L2 norm of div(v) {}".format(L2_div_v))
print("average L2 norm of div(w) {}".format(L2_div_w))
# dxx+dyy should zero everywhere
print("compute divergence")
import matplotlib.pyplot as plt
plt.subplot(211)
plt.imshow(div_v)
plt.colorbar()
plt.title('divergence(v)')
plt.subplot(212)
plt.imshow(div_w)
plt.colorbar()
plt.title('divergence(w)')
plt.show()
#
# main
#
if __name__ == '__main__':
# parse command line
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
default="fBm",
help="write output files with given prefix", metavar="FILE")
parser.add_option("-i", "--fileId", dest="fileId",
default=1,
help="output filename suffix", type="int")
parser.add_option("-s", "--size", dest="size",
default=64,
help="linear size of 3D data", type="int")
parser.add_option("-H", "--hurst", dest="H",
default=0.5,
help="Hurst exponent of the fractional Broniaw motion", type="float")
(options, args) = parser.parse_args()
#print options.filename
print 'Program run with args:\nfilename prefix={0}\nsize={1}\nH={2}\nfileId={3}'.format(options.filename, options.size, options.H,options.fileId)
size = options.size
nx = options.size
ny = options.size
H = options.H
fileId = options.fileId
# 2d test
vx = np.real(genFbm2d_scalar(nx,ny))
vy = np.real(genFbm2d_scalar(nx,ny))
v = np.sqrt(vx**2+vy**2)
# compute divergence free
wx,wy = compute_div_free_2d(vx,vy)
wx = np.real(wx)
wy = np.real(wy)
#w = np.sqrt(wx**2+wy**2)
# check that wx,wy is really divergence-free
#check_divergence(vx,vy, wx,wy)
# plot (vx,vy) and (wx,wy)
#demo_plot(vx, vy, wx, wy)
# save data (before divergence cleaning)
saveXsm(options.filename+'_vx'+str(fileId), vx)
saveXsm(options.filename+'_vy'+str(fileId), vy)
# save data (after divergence cleaning)
saveXsm(options.filename+'_wx'+str(fileId), wx)
saveXsm(options.filename+'_wy'+str(fileId), wy)
| gpl-2.0 |
thekovinc/ds-for-wall-street | tsanalysis/TopVolatileStocks.py | 4 | 2123 | import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import seaborn as sns
datetimeidx = pd.date_range('8/22/2015', periods=5, freq='D')
volatile_tickers = [('one', np.array([0.1, 0.2, 0.3, 1.0, 1.1])),
('two', np.array([0.9, 0.6, 2.1, 1.1, 1.4])),
('three', np.array([0.8, 0.5, 0.1, 1.2, 1.5]))
]
ticker_vol_aug_24 = [('one', 1.0),
('two', 2.0),
('three', 3.0),
('four', 1.0),
]
def plot_volatility_over_time(ticker_df):
"""Plots volatility of list of stocks over time. Not a great plot when
there are many stocks.
Parameters
----------
ticker_df - dataframe of stock time series where columns are stock
prices and the rows are observations at a specific time.
Returns
-------
Displays a plot of volatility over time.
"""
ticker_df.plot()
print("Printing volatility over time plot.")
plt.savefig("img/vol_over_time.png")
plt.close()
def plot_joint_dist(ticker_df):
g = sns.PairGrid(ticker_df)
g.map(plt.scatter)
print("Printing joint distribution")
sns.plt.savefig('img/joint-dist')
sns.plt.close()
def plot_kde_smoothed_joint_dist(ticker_vol):
grid = sns.PairGrid(ticker_vol)
grid.map_diag(sns.kdeplot)
grid.map_offdiag(sns.kdeplot, cmap="Blues_d", n_levels=6)
print("Printing kde smoothed plot.")
sns.plt.savefig('img/kde-smoothed-pair-plot')
sns.plt.close()
def top_10(ticker_vol_pairs):
"""Returns the top ten ticker symbols by volatility."""
sorted_by_vol = sorted(ticker_vol_pairs, key=lambda x: x[1], reverse=True)
ticker_symbols = zip(*sorted_by_vol[:10])[0]
return ticker_symbols
if __name__ == "__main__":
ticker_df = pd.DataFrame.from_items(volatile_tickers)
indexed_ticker_df = ticker_df.set_index(datetimeidx)
plot_volatility_over_time(indexed_ticker_df)
plot_joint_dist(indexed_ticker_df)
plot_kde_smoothed_joint_dist(indexed_ticker_df)
| apache-2.0 |
jungla/ICOM-fluidity-toolbox | Detectors/plot_Poincare_v.py | 1 | 2368 | #!~/python
import fluidity_tools
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
import numpy as np
import vtktools
import myfun
import os
from scipy import interpolate
import gc
gc.enable()
exp = 'r_5k_B_d10_particles'
filename = '/tamay2/mensa/fluidity/'+exp+'/particles.detectors'
filename2 = '/tamay2/mensa/fluidity/'+exp+'/ring_10.pvtu'
data = vtktools.vtu(filename2)
coords = data.GetLocations()
depths = sorted(list(set(coords[:,2])))
Xlist = np.linspace(-180000,180000,50)# x co-ordinates of the desired array shape
Ylist = np.arange(0,1)*0.0
Zlist = np.linspace(0,-900,20)# y co-ordinates of the desired array shape
[X,Y,Z] = myfun.meshgrid2(Xlist,Ylist,Zlist)
Y = np.reshape(Y,(np.size(Y),))
X = np.reshape(X,(np.size(X),))
Z = np.reshape(Z,(np.size(Z),))
pts = vtktools.arr(zip(X,Y,Z))
R = data.ProbeData(pts, 'Density_CG')
rho = np.reshape(R,[len(Zlist),len(Ylist),len(Xlist)])
try: os.stat('./plot/'+exp)
except OSError: os.mkdir('./plot/'+exp)
print 'reading detectors'
det = fluidity_tools.stat_parser(filename)
keys = det.keys() # particles
print 'done.'
pt = int(os.popen('grep position '+filename+'| wc -l').read()) # read the number of particles grepping all the positions in the file
tt = 45000
print 'particles:',pt
print 'timesteps:',tt
#z = range(-10,-890,-10)
#x = range(-100000,100000,3000)
#y = 0.0
par = np.zeros((pt,3,tt))
time = range(3600,3600*(tt+1),3600)
# read particles
print 'reading particles'
for d in range(pt):
temp = det['particles_'+myfun.digit(d+1,3)]['position']
par[d,:,:] = temp[:,0:tt]
point = []
for t in xrange(2,tt-2):
for d in xrange(pt):
if par[d,1,t]*par[d,1,t-1] < 0.0:
# print par[d,0,t],par[d,1,t],par[d,2,t],par[d,0,t-1],par[d,1,t-1],par[d,2,t-1],
f0 = interpolate.griddata(par[d,1,t-2:t+2],par[d,0,t-2:t+2],0.0,method='cubic')
f2 = interpolate.griddata(par[d,1,t-2:t+2],par[d,2,t-2:t+2],0.0,method='cubic')
point.append([float(f0),float(f2)])
apoint = np.asarray(point)
plt.figure()
plt.contour(Xlist,Zlist,np.squeeze(rho),20,colors=[0.5,0.5,0.5])
plt.scatter(apoint[:,0],apoint[:,1],marker='.', s=5, facecolor='0', lw = 0)
plt.ylim([-1000,0])
plt.xlim([-150000,150000])
#plt.scatter(par[:,0,999],par[:,2,999])
plt.savefig('./plot/'+exp+'/Poincare_'+exp+'.eps',bbox_inches='tight')
print 'saving','./plot/'+exp+'/Poincare_'+exp+'.eps'
plt.close()
| gpl-2.0 |
armatita/WOSS | woss.py | 1 | 6232 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 25 18:26:02 2014
@author: pedro.correia
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
def do_linear_transform(x,minimum,maximum):
dmin = x.min()
dmax = x.max()
data = (x-dmin)*(maximum-minimum)/(dmax-dmin)+minimum
return data
def interpolate(x,y):
fx = interp1d(y,x,kind='cubic')
xf = np.float_(fx(np.linspace(y.min(),y.max(),1000)))
return xf
def do_pearson_correlation(a,b):
return np.sum((a-a.mean())*(b-b.mean()))/np.sqrt(np.sum((a-a.mean())**2)*np.sum((b-b.mean())**2))
def do_reflective_similarity(a,b):
return np.sum(a*b)/np.sqrt(np.sum(a**2)*np.sum(b**2))
def do_quasi_correlation(a,b):
return 2*np.sum(a*b)/(np.sum(a**2)+np.sum(b**2))
def do_squared_quasi_correlation(a,b):
return (2*np.sum(a*b)/(np.sum(a**2)+np.sum(b**2)))**2
def do_spearman_correlation(a,b):
return np.sum((a-a.mean())*(b-b.mean()))/np.sqrt(np.sum((a-a.mean())**2)*np.sum((b-b.mean())**2))
def do_cosine_correlation(a,b):
return np.sum(a*b)/np.sqrt(np.sum(a**2)*np.sum(b**2))
def convolve_trace(trace,wavelet):
reflectivity = trace[:,-1].copy()
for i in xrange(trace.shape[0]-1):
reflectivity[i]=(reflectivity[i+1]-reflectivity[i])/(reflectivity[i+1]+reflectivity[i])
reflectivity[-1]=0
synthetic = trace[:,-1].copy()
synthetic[:] = 0
h_size=(wavelet.shape[0]-1)/2
for i in xrange(trace.shape[0]):
if i-h_size<0:
wa=h_size-i
a=0
else:
wa=0
a=i-h_size
if i+h_size>trace.shape[0]:
wb=h_size+i-trace.shape[0]
b=trace.shape[0]
else:
wb=2*h_size+1
b=i+h_size
synthetic[a:b]=synthetic[a:b]+reflectivity[i]*wavelet[wa:(2*h_size-wb)]
return synthetic
def convolve_trace2(trace,wavelet):
reflectivity = trace[:,-1].copy()
for i in xrange(trace.shape[0]-1):
reflectivity[i]=(reflectivity[i+1]-reflectivity[i])/(reflectivity[i+1]+reflectivity[i])
reflectivity[-1]=0
synthetic = trace[:,-1].copy()
synthetic[:] = 0
h_size=(wavelet.shape[0]-1)/2
for i in xrange(trace.shape[0]):
if i-h_size<0:
wa=h_size-i
a=0
else:
wa=0
a=i-h_size
if i+h_size>trace.shape[0]:
wb=h_size+i-trace.shape[0]
b=trace.shape[0]
else:
wb=2*h_size+1
b=i+h_size
synthetic[a:b]=synthetic[a:b]+reflectivity[i]*wavelet[wa:(2*h_size-wb),-1]
return synthetic
def do_ricker_wavelet(f, size, dt=1):
t = np.int_(np.linspace(-size, size, (2*size+1)/dt))
y = (1.0 - 2.0*(np.pi**2)*(f**2)*(t**2)) * np.exp(-(np.pi**2)*(f**2)*(t**2))
data = np.hstack((t[:,np.newaxis],y[:,np.newaxis]))
return data
def woss(well_paths,wavelet_path,wavelet_size,seismic_path,ites=1000,f=0.3,size=29):
well_book = {}
seismic_book = {}
seismic = np.load(seismic_path)
c=0
initial_wavelet = np.loadtxt(wavelet_path)
ricker_wavelet = do_ricker_wavelet(f,size)
awavelet = np.zeros((ricker_wavelet.shape[0],2))
awavelet[:,0] = np.linspace(initial_wavelet[:,0].min(),initial_wavelet[:,0].max(),2*size+1)
awavelet[:,1] = do_linear_transform(ricker_wavelet[:,1],initial_wavelet[:,1].min(),initial_wavelet[:,1].max())
initial_wavelet = awavelet.copy()
final_wavelet = np.zeros(initial_wavelet.shape)
final_wavelet[:,-1] = initial_wavelet[:,-1]
for i in well_paths:
well_book[c] = np.loadtxt(i)
seismic_book[c] = seismic[well_book[c][:,0].astype('int32'),well_book[c][:,1].astype('int32'),well_book[c][:,2].astype('int32')]
c=c+1
cc0 = 0
for t in well_book.keys():
tt = convolve_trace2(well_book[t],final_wavelet)
cc0 = cc0 + do_quasi_correlation(tt,seismic_book[t])
ccs = np.float(cc0)
ccf = np.float(cc0)
nt = []
for i in xrange(ites):
c = np.random.randint(0,initial_wavelet.shape[0])
p = np.random.triangular(initial_wavelet[:,-1].min(),final_wavelet[c,-1],initial_wavelet[:,-1].max())
appex = final_wavelet[c,-1]
final_wavelet[c,-1]=p
cc = 0
for t in well_book.keys():
tt = convolve_trace2(well_book[t],final_wavelet)
cc = cc + do_quasi_correlation(tt,seismic_book[t])
if cc>cc0:
cc0 = np.float(cc)
ccf = np.float(cc0)
nt.append(i)
else:
final_wavelet[c,-1] = appex
print 'START: ',ccs/len(well_book.keys()), ' - ONGOING: ',ccf/len(well_book.keys())
print 'DONE ITERATION ',i
print '#############################################'
print 'INITIAL MEAN CORRELATION WAS: ',ccs/len(well_book.keys())
print 'FINAL MEAN CORRELATION IS: ',ccf/len(well_book.keys())
plt.plot(initial_wavelet[:,1],initial_wavelet[:,0],color='red',linewidth=3,label='initial wavelet')
plt.plot(final_wavelet[:,1],initial_wavelet[:,0],color='black',linewidth=3,label='optimized wavelet')
plt.plot(interpolate(final_wavelet[:,1],initial_wavelet[:,0]),np.linspace(initial_wavelet[:,0].min(),initial_wavelet[:,0].max(),1000),color='green',linewidth=3,label='interpolation')
plt.fill_betweenx(initial_wavelet[:,0],initial_wavelet[:,1],final_wavelet[:,1],color='pink',alpha=0.3)
plt.xlim(initial_wavelet[:,1].min(),initial_wavelet[:,1].max())
plt.ylim(initial_wavelet[:,0].min(),initial_wavelet[:,0].max())
x=[]
y=[]
z=[]
for i in xrange(initial_wavelet[:,0].shape[0]):
y.append(initial_wavelet[i,0])
y.append(initial_wavelet[i,0])
z.append(initial_wavelet[i,1]-final_wavelet[i,1])
z.append(initial_wavelet[i,1]-final_wavelet[i,1])
x.append(initial_wavelet[i,1])
x.append(final_wavelet[i,1])
plt.legend()
plt.grid()
plt.show()
######### THIS IS HOW I WOULD LAUNCH THIS ALGORITHM ############
#well_paths = ['logs_AI_1.prn','logs_AI_2.prn']
#wavelet_path = 'wavelet.txt'
#wavelet_size = 19
#seismic_path = 'seismic.npy'
#woss(well_paths,wavelet_path,wavelet_size,seismic_path,ites=10000)
| mit |
KonradBreitsprecher/espresso | doc/tutorials/08-visualization/scripts/plotting.py | 1 | 3315 | from __future__ import print_function
import numpy
from matplotlib import pyplot
from threading import Thread
import espressomd
from espressomd import visualization
# System parameters
#############################################################
# 10 000 Particles
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l, box_l, box_l])
system.time_step = 0.01
system.cell_system.skin = 0.4
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min_dist
min_dist = 0.9
# integration
int_steps = 1000
int_n_times = 100
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.force_cap = lj_cap
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=numpy.random.random(3) * system.box_l)
system.analysis.dist_to(0)
act_min_dist = system.analysis.mindist()
system.cell_system.max_num_cells = 2744
#############################################################
# Warmup Integration #
#############################################################
# set LJ cap
lj_cap = 20
system.force_cap = lj_cap
# Warmup Integration Loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
system.integrator.run(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.mindist()
i += 1
# Increase LJ cap
lj_cap = lj_cap + 10
system.force_cap = lj_cap
#############################################################
# Integration #
#############################################################
# remove force capping
lj_cap = 0
system.force_cap = lj_cap
energies = numpy.empty((int_steps,2))
current_time = -1
pyplot.xlabel("time")
pyplot.ylabel("energy")
plot, = pyplot.plot([0],[0])
pyplot.show(block=False)
def update_plot():
if current_time < 0:
return
i = current_time
plot.set_xdata(energies[:i+1,0])
plot.set_ydata(energies[:i+1,1])
pyplot.xlim(0, energies[i,0])
pyplot.ylim(energies[:i+1,1].min(), energies[:i+1,1].max())
pyplot.draw()
pyplot.pause(0.01)
def main():
global current_time
for i in range(0, int_n_times):
print("run %d at time=%f " % (i, system.time))
system.integrator.run(int_steps)
energies[i] = (system.time, system.analysis.energy()['total'])
current_time = i
update_plot()
main()
print("Average energy: %6g" % energies[:,1].mean())
# terminate program
print("\nFinished.")
| gpl-3.0 |
carrillo/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
HaebinShin/tensorflow | tensorflow/contrib/learn/python/learn/estimators/base.py | 4 | 19070 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import types
import numpy as np
import six
from six import string_types
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io.data_feeder import setup_train_data_feeder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
def _write_with_backup(filename, content):
if gfile.Exists(filename):
gfile.Rename(filename, filename + '.old', overwrite=True)
with gfile.Open(filename, 'w') as f:
f.write(content)
def _copy_dir(dir_in, dir_out):
gfile.MakeDirs(dir_out)
for name in gfile.ListDirectory(dir_in):
name_in = os.path.join(dir_in, name)
name_out = os.path.join(dir_out, name)
if gfile.IsDirectory(name_in):
gfile.MakeDirs(name_out)
_copy_dir(name_in, name_out)
else:
gfile.Copy(name_in, name_out, overwrite=True)
class TensorFlowEstimator(estimator.Estimator):
"""Base class for all TensorFlow estimators."""
def __init__(self,
model_fn,
n_classes,
batch_size=32,
steps=200,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
class_weight=None,
continue_training=False,
config=None,
verbose=1):
"""Initializes a TensorFlowEstimator instance.
Args:
model_fn: Model function, that takes input `x`, `y` tensors and outputs
prediction and loss tensors.
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is used.
Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
````python
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
````
clip_gradients: Clip norm of the gradients to this value to stop
gradient explosion.
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are supposed to
have weight one.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
* 0: the algorithm and debug information is muted.
* 1: trainer prints the progress.
* 2: log device placement is printed.
"""
self.class_weight = class_weight
self.learning_rate = learning_rate
self.clip_gradients = clip_gradients
if isinstance(optimizer, six.string_types):
if optimizer not in layers.OPTIMIZER_CLS_NAMES:
raise ValueError(
'Optimizer name should be one of [%s], you provided %s.' %
(', '.join(layers.OPTIMIZER_CLS_NAMES), optimizer))
self.optimizer = optimizer
super(TensorFlowEstimator, self).__init__(
model_fn=self._get_model_fn(model_fn),
config=config)
self.n_classes = n_classes
self.batch_size = batch_size
self.steps = steps
self.verbose = verbose
self.continue_training = continue_training
self._data_feeder = None
def fit(self, x, y, steps=None, monitors=None, logdir=None):
"""Neural network model from provided `model_fn` and training data.
Note: called first time constructs the graph and initializers
variables. Consecutives times it will continue training the same model.
This logic follows partial_fit() interface in scikit-learn.
To restart learning, create new estimator.
Args:
x: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression).
steps: int, number of steps to train.
If None or 0, train for `self.steps`.
monitors: List of `BaseMonitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for
optional visualization.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = setup_train_data_feeder(
x, y, n_classes=self.n_classes, batch_size=self.batch_size)
self._train_model(input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors)
return self
def evaluate(self, x=None, y=None, input_fn=None, steps=None):
"""See base class."""
feed_fn = None
if x is not None:
eval_data_feeder = setup_train_data_feeder(
x, y, n_classes=self.n_classes, batch_size=self.batch_size, epochs=1)
input_fn, feed_fn = (eval_data_feeder.input_builder,
eval_data_feeder.get_feed_dict_fn())
return self._evaluate_model(
input_fn=input_fn, feed_fn=feed_fn, steps=steps or self.steps)
def partial_fit(self, x, y):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class label in classification, real numbers in regression).
Returns:
Returns self.
"""
return self.fit(x, y)
def _predict(self, x, axis=-1, batch_size=None):
if self._graph is None:
raise NotFittedError()
# Use the batch size for fitting if the user did not specify one.
if batch_size is None:
batch_size = self.batch_size
predict_data_feeder = setup_train_data_feeder(
x, None, n_classes=None,
batch_size=batch_size,
shuffle=False, epochs=1)
preds = self._infer_model(
input_fn=predict_data_feeder.input_builder,
feed_fn=predict_data_feeder.get_feed_dict_fn())
if self.n_classes > 1 and axis != -1:
preds = preds.argmax(axis=axis)
else:
preds = preds
return preds
def predict(self, x, axis=1, batch_size=None):
"""Predict class or regression for `x`.
For a classification model, the predicted class for each sample in `x` is
returned. For a regression model, the predicted value based on `x` is
returned.
Args:
x: array-like matrix, [n_samples, n_features...] or iterator.
axis: Which axis to argmax for classification.
By default axis 1 (next after batch) is used.
Use 2 for sequence predictions.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member
variable is used.
Returns:
y: array of shape [n_samples]. The predicted classes or predicted
value.
"""
return self._predict(x, axis=axis, batch_size=batch_size)
def predict_proba(self, x, batch_size=None):
"""Predict class probability of the input samples `x`.
Args:
x: array-like matrix, [n_samples, n_features...] or iterator.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member variable is used.
Returns:
y: array of shape [n_samples, n_classes]. The predicted
probabilities for each class.
"""
return self._predict(x, batch_size=batch_size)
def get_tensor(self, name):
"""Returns tensor by name.
Args:
name: string, name of the tensor.
Returns:
Tensor.
"""
if self._graph is None:
raise NotFittedError
return self._graph.get_tensor_by_name(name)
def save(self, path):
"""Saves checkpoints and graph to given path.
Args:
path: Folder to save model to.
"""
if self._graph is None:
raise NotFittedError
# Copy model dir into new path.
_copy_dir(self.model_dir, path)
# Save model definition.
all_params = self.get_params()
params = {}
for key, value in all_params.items():
if not callable(value) and value is not None:
params[key] = value
params['class_name'] = type(self).__name__
model_def = json.dumps(
params,
default=lambda o: o.__dict__ if hasattr(o, '__dict__') else None)
_write_with_backup(os.path.join(path, 'model.def'), model_def)
def _restore(self, path):
"""Restores this estimator from given path.
Note: will rebuild the graph and initialize all parameters,
and will ignore provided model.
Args:
path: Path to checkpoints and other information.
"""
raise NotImplementedError
@classmethod
def restore(cls, path, config=None):
# pylint: disable=unused-argument
"""Restores model from give path.
Args:
path: Path to the checkpoints and other model information.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be
reconfigured.
Returns:
Estimator, object of the subclass of TensorFlowEstimator.
Raises:
ValueError: if `path` does not contain a model definition.
"""
model_def_filename = os.path.join(path, 'model.def')
if not os.path.exists(model_def_filename):
raise ValueError("Restore folder doesn't contain model definition.")
# list of parameters that are allowed to be reconfigured
reconfigurable_params = ['_config']
_config = config # pylint: disable=unused-variable,invalid-name
with gfile.Open(model_def_filename) as fmodel:
model_def = json.loads(fmodel.read())
# TensorFlow binding requires parameters to be strings not unicode.
# Only issue in Python2.
for key, value in model_def.items():
if isinstance(value, string_types) and not isinstance(value, str):
model_def[key] = str(value)
if key in reconfigurable_params:
new_value = locals()[key]
if new_value is not None:
model_def[key] = new_value
class_name = model_def.pop('class_name')
if class_name == 'TensorFlowEstimator':
custom_estimator = TensorFlowEstimator(model_fn=None, **model_def)
# pylint: disable=protected-access
custom_estimator._restore(path)
return custom_estimator
# To avoid cyclical dependencies, import inside the function instead of
# the beginning of the file.
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python.learn import estimators
# Estimator must be one of the defined estimators in the __init__ file.
result = getattr(estimators, class_name)(**model_def)
# pylint: disable=protected-access
result._restore(path)
return result
def _get_model_fn(self, model_fn):
"""Backward compatibility way of adding class weight and IS_TRAINING.
TODO(ipolosukhin): Remove this function after new layers are available.
Specifically:
* dropout and batch norm should work via update ops.
* class weights should be retrieved from weights column or hparams.
Args:
model_fn: Core model function.
Returns:
Model function.
"""
def _model_fn(features, targets, mode):
"""Model function."""
ops.get_default_graph().add_to_collection('IS_TRAINING', mode == 'train')
if self.class_weight is not None:
constant_op.constant(self.class_weight, name='class_weight')
predictions, loss = model_fn(features, targets)
if isinstance(self.learning_rate, types.FunctionType):
learning_rate = self.learning_rate(contrib_framework.get_global_step())
else:
learning_rate = self.learning_rate
if isinstance(self.optimizer, types.FunctionType):
optimizer = self.optimizer(learning_rate)
else:
optimizer = self.optimizer
train_op = layers.optimize_loss(
loss,
contrib_framework.get_global_step(),
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=self.clip_gradients)
return predictions, loss, train_op
return _model_fn
class TensorFlowBaseTransformer(TensorFlowEstimator, _sklearn.TransformerMixin):
"""TensorFlow Base Transformer class."""
def transform(self, x):
"""Transform `x` using trained transformer."""
return(super(TensorFlowBaseTransformer, self).predict(
x, axis=1, batch_size=None))
def fit(self, x, y=None, monitor=None, logdir=None):
"""Fit a transformer."""
return(super(TensorFlowBaseTransformer, self).fit(
x, y, monitors=None, logdir=None))
def fit_transform(self, x, y=None, monitor=None, logdir=None):
"""Fit transformer and transform `x` using trained transformer."""
return self.fit(x, y, monitor=None, logdir=None).transform(x)
class DeprecatedMixin(object):
"""This is mixin for deprecated TensorFlowYYY classes."""
def __init__(self, *args, **kwargs):
this_class = type(self).__name__
alternative_class = this_class[len('TensorFlow'):]
logging.warning(
'%s class is deprecated. Please consider using %s as an alternative.',
this_class, alternative_class)
# Handle deprecated arguments.
self.__deprecated_n_classes = kwargs.get('n_classes', 0)
if self.__deprecated_n_classes < 1 and 'n_classes' in kwargs:
kwargs.pop('n_classes')
self.batch_size = kwargs.pop('batch_size', 32)
self.steps = kwargs.pop('steps', 200)
if 'optimizer' in kwargs or 'learning_rate' in kwargs:
self.learning_rate = kwargs.pop('learning_rate', 0.1)
self.optimizer = kwargs.pop('optimizer', 'Adagrad')
if isinstance(self.learning_rate, types.FunctionType):
raise ValueError('Function-like learning_rate are not supported '
'consider using custom Estimator.')
else:
learning_rate = self.learning_rate
if isinstance(self.optimizer, types.FunctionType):
optimizer = self.optimizer(learning_rate)
elif isinstance(self.optimizer, six.string_types):
optimizer = layers.OPTIMIZER_CLS_NAMES[self.optimizer](learning_rate)
else:
optimizer = self.optimizer
kwargs['optimizer'] = optimizer
if 'class_weight' in kwargs:
raise ValueError('Sorry we switched interface for providing class '
'weights. Please use weight column instead which '
'provides more granular control (per example).')
if 'clip_gradients' in kwargs:
logging.warning('clip_gradients argument in %s is now converted to '
'gradient_clip_norm.' % this_class)
kwargs['gradient_clip_norm'] = kwargs.pop('clip_gradients')
else:
kwargs['gradient_clip_norm'] = 5.0
if 'continue_training' in kwargs:
logging.warning('continue_training argument in %s is now ignored.' %
this_class)
kwargs.pop('continue_training')
if 'verbose' in kwargs:
logging.warning('verbose argument in %s is now ignored.' %
this_class)
kwargs.pop('verbose')
super(DeprecatedMixin, self).__init__(*args, **kwargs)
def fit(self, x, y, steps=None, batch_size=None, monitors=None, logdir=None):
if logdir is not None:
self._model_dir = logdir
return super(DeprecatedMixin, self).fit(
x=x, y=y, steps=steps or self.steps,
batch_size=batch_size or self.batch_size, monitors=monitors)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
axis=1):
"""Predict class or regression for `x`."""
if x is not None:
predict_data_feeder = setup_train_data_feeder(
x, None, n_classes=None,
batch_size=batch_size or self.batch_size,
shuffle=False, epochs=1)
result = super(DeprecatedMixin, self)._infer_model(
input_fn=predict_data_feeder.input_builder,
feed_fn=predict_data_feeder.get_feed_dict_fn(),
outputs=outputs)
else:
result = super(DeprecatedMixin, self)._infer_model(
input_fn=input_fn, outputs=outputs)
if self.__deprecated_n_classes > 1 and axis is not None:
return np.argmax(result, axis)
return result
def predict_proba(self, x=None, input_fn=None, batch_size=None, outputs=None):
return self.predict(x=x, input_fn=input_fn, batch_size=batch_size,
outputs=outputs, axis=None)
def save(self, path):
"""Saves checkpoints and graph to given path.
Args:
path: Folder to save model to.
"""
# Copy model dir into new path.
_copy_dir(self.model_dir, path)
| apache-2.0 |
rvraghav93/scikit-learn | sklearn/neighbors/unsupervised.py | 7 | 4756 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`kneighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
majetideepak/arrow | python/pyarrow/tests/test_hdfs.py | 3 | 13419 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pickle
import pytest
import random
import unittest
from io import BytesIO
from os.path import join as pjoin
import numpy as np
import pyarrow as pa
import pyarrow.tests.test_parquet as test_parquet
from pyarrow.compat import guid
from pyarrow.pandas_compat import _pandas_api
# ----------------------------------------------------------------------
# HDFS tests
def hdfs_test_client(driver='libhdfs'):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')
user = os.environ.get('ARROW_HDFS_TEST_USER', None)
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
return pa.hdfs.connect(host, port, user, driver=driver)
@pytest.mark.hdfs
class HdfsTestCases(object):
def _make_test_file(self, hdfs, test_name, test_path, test_data):
base_path = pjoin(self.tmp_path, test_name)
hdfs.mkdir(base_path)
full_path = pjoin(base_path, test_path)
with hdfs.open(full_path, 'wb') as f:
f.write(test_data)
return full_path
@classmethod
def setUpClass(cls):
cls.check_driver()
cls.hdfs = hdfs_test_client(cls.DRIVER)
cls.tmp_path = '/tmp/pyarrow-test-{0}'.format(random.randint(0, 1000))
cls.hdfs.mkdir(cls.tmp_path)
@classmethod
def tearDownClass(cls):
cls.hdfs.delete(cls.tmp_path, recursive=True)
cls.hdfs.close()
def test_unknown_driver(self):
with pytest.raises(ValueError):
hdfs_test_client(driver="not_a_driver_name")
def test_pickle(self):
s = pickle.dumps(self.hdfs)
h2 = pickle.loads(s)
assert h2.is_open
assert h2.host == self.hdfs.host
assert h2.port == self.hdfs.port
assert h2.user == self.hdfs.user
assert h2.kerb_ticket == self.hdfs.kerb_ticket
assert h2.driver == self.hdfs.driver
# smoketest unpickled client works
h2.ls(self.tmp_path)
def test_cat(self):
path = pjoin(self.tmp_path, 'cat-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
contents = self.hdfs.cat(path)
assert contents == data
def test_capacity_space(self):
capacity = self.hdfs.get_capacity()
space_used = self.hdfs.get_space_used()
disk_free = self.hdfs.df()
assert capacity > 0
assert capacity > space_used
assert disk_free == (capacity - space_used)
def test_close(self):
client = hdfs_test_client()
assert client.is_open
client.close()
assert not client.is_open
with pytest.raises(Exception):
client.ls('/')
def test_mkdir(self):
path = pjoin(self.tmp_path, 'test-dir/test-dir')
parent_path = pjoin(self.tmp_path, 'test-dir')
self.hdfs.mkdir(path)
assert self.hdfs.exists(path)
self.hdfs.delete(parent_path, recursive=True)
assert not self.hdfs.exists(path)
def test_mv_rename(self):
path = pjoin(self.tmp_path, 'mv-test')
new_path = pjoin(self.tmp_path, 'mv-new-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
assert self.hdfs.exists(path)
self.hdfs.mv(path, new_path)
assert not self.hdfs.exists(path)
assert self.hdfs.exists(new_path)
assert self.hdfs.cat(new_path) == data
self.hdfs.rename(new_path, path)
assert self.hdfs.cat(path) == data
def test_info(self):
path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(path, 'ex')
self.hdfs.mkdir(path)
data = b'foobarbaz'
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
path_info = self.hdfs.info(path)
file_path_info = self.hdfs.info(file_path)
assert path_info['kind'] == 'directory'
assert file_path_info['kind'] == 'file'
assert file_path_info['size'] == len(data)
def test_exists_isdir_isfile(self):
dir_path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(dir_path, 'ex')
missing_path = pjoin(dir_path, 'this-path-is-missing')
self.hdfs.mkdir(dir_path)
with self.hdfs.open(file_path, 'wb') as f:
f.write(b'foobarbaz')
assert self.hdfs.exists(dir_path)
assert self.hdfs.exists(file_path)
assert not self.hdfs.exists(missing_path)
assert self.hdfs.isdir(dir_path)
assert not self.hdfs.isdir(file_path)
assert not self.hdfs.isdir(missing_path)
assert not self.hdfs.isfile(dir_path)
assert self.hdfs.isfile(file_path)
assert not self.hdfs.isfile(missing_path)
def test_disk_usage(self):
path = pjoin(self.tmp_path, 'disk-usage-base')
p1 = pjoin(path, 'p1')
p2 = pjoin(path, 'p2')
subdir = pjoin(path, 'subdir')
p3 = pjoin(subdir, 'p3')
if self.hdfs.exists(path):
self.hdfs.delete(path, True)
self.hdfs.mkdir(path)
self.hdfs.mkdir(subdir)
data = b'foobarbaz'
for file_path in [p1, p2, p3]:
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
assert self.hdfs.disk_usage(path) == len(data) * 3
def test_ls(self):
base_path = pjoin(self.tmp_path, 'ls-test')
self.hdfs.mkdir(base_path)
dir_path = pjoin(base_path, 'a-dir')
f1_path = pjoin(base_path, 'a-file-1')
self.hdfs.mkdir(dir_path)
f = self.hdfs.open(f1_path, 'wb')
f.write(b'a' * 10)
contents = sorted(self.hdfs.ls(base_path, False))
assert contents == [dir_path, f1_path]
def test_chmod_chown(self):
path = pjoin(self.tmp_path, 'chmod-test')
with self.hdfs.open(path, 'wb') as f:
f.write(b'a' * 10)
def test_download_upload(self):
base_path = pjoin(self.tmp_path, 'upload-test')
data = b'foobarbaz'
buf = BytesIO(data)
buf.seek(0)
self.hdfs.upload(base_path, buf)
out_buf = BytesIO()
self.hdfs.download(base_path, out_buf)
out_buf.seek(0)
assert out_buf.getvalue() == data
def test_file_context_manager(self):
path = pjoin(self.tmp_path, 'ctx-manager')
data = b'foo'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
assert f.size() == 3
result = f.read(10)
assert result == data
def test_open_not_exist_error_message(self):
# ARROW-226
path = pjoin(self.tmp_path, 'does-not-exist-123')
try:
self.hdfs.open(path)
except Exception as e:
assert 'file does not exist' in e.args[0].lower()
def test_read_whole_file(self):
path = pjoin(self.tmp_path, 'read-whole-file')
data = b'foo' * 1000
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
result = f.read()
assert result == data
def _write_multiple_hdfs_pq_files(self, tmpdir):
import pyarrow.parquet as pq
nfiles = 10
size = 5
test_data = []
for i in range(nfiles):
df = test_parquet._test_dataframe(size, seed=i)
df['index'] = np.arange(i * size, (i + 1) * size)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = pjoin(tmpdir, '{0}.parquet'.format(i))
table = pa.Table.from_pandas(df, preserve_index=False)
with self.hdfs.open(path, 'wb') as f:
pq.write_table(table, f)
test_data.append(table)
expected = pa.concat_tables(test_data)
return expected
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files(self):
tmpdir = pjoin(self.tmp_path, 'multi-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
result = self.hdfs.read_parquet(tmpdir)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'multi-parquet-uri-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
path = _get_hdfs_uri(tmpdir)
result = pq.read_table(path)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_write_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'uri-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
path = _get_hdfs_uri(pjoin(tmpdir, 'test.parquet'))
size = 5
df = test_parquet._test_dataframe(size, seed=0)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
table = pa.Table.from_pandas(df, preserve_index=False)
pq.write_table(table, path, filesystem=self.hdfs)
result = pq.read_table(path, filesystem=self.hdfs).to_pandas()
_pandas_api.assert_frame_equal(result, df)
@pytest.mark.parquet
def test_read_common_metadata_files(self):
tmpdir = pjoin(self.tmp_path, 'common-metadata-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_read_common_metadata_files(self.hdfs, tmpdir)
@pytest.mark.parquet
def test_write_to_dataset_with_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_with_partitions(
tmpdir, filesystem=self.hdfs)
@pytest.mark.parquet
def test_write_to_dataset_no_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-no_partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_no_partitions(
tmpdir, filesystem=self.hdfs)
class TestLibHdfs(HdfsTestCases, unittest.TestCase):
DRIVER = 'libhdfs'
@classmethod
def check_driver(cls):
if not pa.have_libhdfs():
pytest.skip('No libhdfs available on system')
def test_orphaned_file(self):
hdfs = hdfs_test_client()
file_path = self._make_test_file(hdfs, 'orphaned_file_test', 'fname',
b'foobarbaz')
f = hdfs.open(file_path)
hdfs = None
f = None # noqa
class TestLibHdfs3(HdfsTestCases, unittest.TestCase):
DRIVER = 'libhdfs3'
@classmethod
def check_driver(cls):
if not pa.have_libhdfs3():
pytest.skip('No libhdfs3 available on system')
def _get_hdfs_uri(path):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'localhost')
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
uri = "hdfs://{}:{}{}".format(host, port, path)
return uri
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.fastparquet
@pytest.mark.parametrize('client', ['libhdfs', 'libhdfs3'])
def test_fastparquet_read_with_hdfs(client):
from pandas.util.testing import assert_frame_equal, makeDataFrame
try:
import snappy # noqa
except ImportError:
pytest.skip('fastparquet test requires snappy')
import pyarrow.parquet as pq
fastparquet = pytest.importorskip('fastparquet')
fs = hdfs_test_client(client)
df = makeDataFrame()
table = pa.Table.from_pandas(df)
path = '/tmp/testing.parquet'
with fs.open(path, 'wb') as f:
pq.write_table(table, f)
parquet_file = fastparquet.ParquetFile(path, open_with=fs.open)
result = parquet_file.to_pandas()
assert_frame_equal(result, df)
| apache-2.0 |
toastedcornflakes/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 63 | 2945 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from itertools import cycle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positive elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
colors = cycle(['b', 'r', 'g', 'c', 'k'])
neg_log_alphas_lasso = -np.log10(alphas_lasso)
neg_log_alphas_enet = -np.log10(alphas_enet)
for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso)
for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_positive_lasso, coef_pl, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet)
for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors):
l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c)
l2 = plt.plot(neg_log_alphas_positive_enet, coef_pe, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
harshaneelhg/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
weidel-p/nest-simulator | pynest/examples/one_neuron.py | 2 | 3809 | # -*- coding: utf-8 -*-
#
# one_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
One neuron example
------------------
This script simulates a neuron driven by a constant external current
and records its membrane potential.
See Also
~~~~~~~~
:doc:`twoneurons`
"""
#######################################################################
# First, we import all necessary modules for simulation, analysis and
# plotting. Additionally, we set the verbosity to suppress info
# messages and reset the kernel.
# Resetting the kernel allows you to execute the script several
# times in a Python shell without interferences from previous NEST
# simulations. Thus, without resetting the kernel the network status
# including connections between nodes, status of neurons, devices and
# intrinsic time clocks, is kept and influences the next simulations.
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
#######################################################################
# Second, the nodes (neurons and devices) are created using ``Create``.
# We store the returned handles in variables for later reference.
# The ``Create`` function also allow you to create multiple nodes
# e.g. ``nest.Create('iaf_psc_alpha',5)``
# Also default parameters of the model can be configured using ``Create``
# by including a list of parameter dictionaries
# e.g. `nest.Create("iaf_psc_alpha", params=[{'I_e':376.0}])`.
# In this example we will configure these parameters in an additional
# step, which is explained in the third section.
neuron = nest.Create("iaf_psc_alpha")
voltmeter = nest.Create("voltmeter")
#######################################################################
# Third, the neuron is configured using `SetStatus()`, which expects
# a list of node handles and a list of parameter dictionaries.
# In this example we use `SetStatus()` to configure the constant
# current input to the neuron.
neuron.I_e = 376.0
#######################################################################
# Fourth, the neuron is connected to the voltmeter. The command
# ``Connect`` has different variants. Plain ``Connect`` just takes the
# handles of pre- and post-synaptic nodes and uses the default values
# for weight and delay. Note that the connection direction for the voltmeter is
# reversed compared to the spike detector, because it observes the
# neuron instead of receiving events from it. Thus, ``Connect``
# reflects the direction of signal flow in the simulation kernel
# rather than the physical process of inserting an electrode into the
# neuron. The latter semantics is presently not available in NEST.
nest.Connect(voltmeter, neuron)
#######################################################################
# Now we simulate the network using ``Simulate``, which takes the
# desired simulation time in milliseconds.
nest.Simulate(1000.0)
#######################################################################
# Finally, we plot the neuron's membrane potential as a function of
# time and display the plot using pyplot.
nest.voltage_trace.from_device(voltmeter)
plt.show()
| gpl-2.0 |
selective-inference/selective-inference | selectinf/randomized/tests/sandbox/test_opt_weighted_intervals.py | 3 | 2290 | from itertools import product
import numpy as np
import nose.tools as nt
from ..convenience import lasso, step, threshold
from ..query import optimization_sampler
from ...tests.instance import (gaussian_instance,
logistic_instance,
poisson_instance)
from ...tests.flags import SMALL_SAMPLES
from ...tests.decorators import set_sampling_params_iftrue, set_seed_iftrue
import matplotlib.pyplot as plt
from scipy.stats import t as tdist
from ..glm import glm_nonparametric_bootstrap, pairs_bootstrap_glm
from ..M_estimator import restricted_Mest
@set_seed_iftrue(False, 200)
@set_sampling_params_iftrue(SMALL_SAMPLES, ndraw=1000, burnin=100)
def test_opt_weighted_intervals(ndraw=20000, burnin=2000):
results = []
cls = lasso
for const_info, rand in product(zip([gaussian_instance], [cls.gaussian]), ['laplace', 'gaussian']):
inst, const = const_info
X, Y, beta = inst(n=100, p=20, s=0, signal=5., sigma=5.)[:3]
n, p = X.shape
W = np.ones(X.shape[1]) * 8
conv = const(X, Y, W, randomizer=rand, parametric_cov_estimator=True)
signs = conv.fit()
print("signs", signs)
marginalizing_groups = np.ones(p, np.bool)
#marginalizing_groups[:int(p/2)] = True
conditioning_groups = ~marginalizing_groups
#conditioning_groups[-int(p/4):] = False
conv.decompose_subgradient(marginalizing_groups=marginalizing_groups,
conditioning_groups=conditioning_groups)
selected_features = conv._view.selection_variable['variables']
nactive=selected_features.sum()
print("nactive", nactive)
if nactive==0:
results.append(None)
else:
sel_pivots, sel_pval, sel_ci = conv.summary(selected_features,
parameter=beta[selected_features],
ndraw=ndraw,
burnin=burnin,
compute_intervals=True)
print(sel_pivots)
results.append((rand, sel_pivots, sel_ci, beta[selected_features]))
return results
| bsd-3-clause |
JWDebelius/scikit-bio | skbio/stats/tests/test_gradient.py | 1 | 54772 | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.utils.six import StringIO
from future.builtins import zip
from operator import attrgetter
from unittest import TestCase, main
import numpy as np
import pandas as pd
import numpy.testing as npt
import pandas.util.testing as pdt
from skbio.util import get_data_path
from skbio.stats.gradient import (GradientANOVA, AverageGradientANOVA,
TrajectoryGradientANOVA,
FirstDifferenceGradientANOVA,
WindowDifferenceGradientANOVA, GroupResults,
CategoryResults, GradientANOVAResults,
_weight_by_vector, _ANOVA_trajectories)
class BaseTests(TestCase):
def setUp(self):
"""Initializes some data for testing"""
coord_data = {
'PC.636': np.array([-0.212230626531, 0.216034194368, 0.03532727349,
-0.254450494129, -0.0687468542543,
0.231895596562, 0.00496549154314,
-0.0026246871695, 9.73837390723e-10]),
'PC.635': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992, 0.0957182357964,
0.204714844022, -0.0055407341857,
-0.190287966833, 0.16307126638,
9.73837390723e-10]),
'PC.356': np.array([0.220886492631, 0.0874848360559,
-0.351990132198, -0.00316535032886,
0.114635191853, -0.00019194106125,
0.188557853937, 0.030002427212,
9.73837390723e-10]),
'PC.481': np.array([0.0308923744062, -0.0446295973489,
0.133996451689, 0.29318228566, -0.167812539312,
0.130996149793, 0.113551017379, 0.109987942454,
9.73837390723e-10]),
'PC.354': np.array([0.27616778138, -0.0341866951102,
0.0633000238256, 0.100446653327,
0.123802521199, 0.1285839664, -0.132852841046,
-0.217514322505, 9.73837390723e-10]),
'PC.593': np.array([0.202458130052, -0.115216120518,
0.301820871723, -0.18300251046, 0.136208248567,
-0.0989435556722, 0.0927738484879,
0.0909429797672, 9.73837390723e-10]),
'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424, -0.0225473129718,
-0.205287183891, -0.180224615141,
-0.165277751908, 0.0411933458557,
9.73837390723e-10]),
'PC.607': np.array([-0.105517545144, -0.41405687433,
-0.150073017617, -0.116066751485,
-0.158763393475, -0.0223918378516,
-0.0263068046112, -0.0501209518091,
9.73837390723e-10]),
'PC.634': np.array([-0.371636765565, 0.115484234741,
0.0721996475289, 0.0898852445906,
0.0212491652909, -0.184183028843,
0.114877153051, -0.164938000185,
9.73837390723e-10])
}
self.coords = pd.DataFrame.from_dict(coord_data, orient='index')
coord_data = {
'PC.636': np.array([-0.212230626531, 0.216034194368,
0.03532727349]),
'PC.635': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992]),
'PC.356': np.array([0.220886492631, 0.0874848360559,
-0.351990132198]),
'PC.481': np.array([0.0308923744062, -0.0446295973489,
0.133996451689]),
'PC.354': np.array([0.27616778138, -0.0341866951102,
0.0633000238256]),
'PC.593': np.array([0.202458130052, -0.115216120518,
0.301820871723]),
'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424]),
'PC.607': np.array([-0.105517545144, -0.41405687433,
-0.150073017617]),
'PC.634': np.array([-0.371636765565, 0.115484234741,
0.0721996475289])
}
self.coords_3axes = pd.DataFrame.from_dict(coord_data, orient='index')
metadata_map = {'PC.354': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '60',
'Description': 'Control_mouse_I.D._354'},
'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'PC.356': {'Treatment': 'Control',
'DOB': '20061126',
'Weight': '50',
'Description': 'Control_mouse_I.D._356'},
'PC.481': {'Treatment': 'Control',
'DOB': '20070314',
'Weight': '52',
'Description': 'Control_mouse_I.D._481'},
'PC.593': {'Treatment': 'Control',
'DOB': '20071210',
'Weight': '57',
'Description': 'Control_mouse_I.D._593'},
'PC.607': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'},
'PC.634': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '68',
'Description': 'Fasting_mouse_I.D._634'},
'PC.635': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '70',
'Description': 'Fasting_mouse_I.D._635'},
'PC.636': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '72',
'Description': 'Fasting_mouse_I.D._636'}}
self.metadata_map = pd.DataFrame.from_dict(metadata_map,
orient='index')
self.prop_expl = np.array([25.6216900347, 15.7715955926,
14.1215046787, 11.6913885817, 9.83044890697,
8.51253468595, 7.88775505332, 6.56308246609,
4.42499350906e-16])
gr_wo_msg = GroupResults('Foo', np.array([-2.6750, -0.2510,
-2.8322, 0.]),
-1.4398, {'mean': -1.4398, 'std': 1.3184},
None)
gr_w_msg = GroupResults('Bar', np.array([9.6823, 2.9511, 5.2434]),
5.9589, {'mean': 5.9589, 'std': 2.7942},
"Cannot calculate the first difference "
"with a window of size (3).")
self.groups = [gr_wo_msg, gr_w_msg]
cr_no_data = CategoryResults('foo', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
cr_data = CategoryResults('bar', 0.0110, self.groups, None)
self.categories = [cr_no_data, cr_data]
vr = GradientANOVAResults('wdiff', True, self.categories)
description = CategoryResults('Description', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
weight = CategoryResults('Weight', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
dob = CategoryResults('DOB', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
control_group = GroupResults('Control', np.array([2.3694, 3.3716,
5.4452, 4.5704,
4.4972]),
4.0508, {'avg': 4.0508}, None)
fast_group = GroupResults('Fast', np.array([7.2220, 4.2726, 1.1169,
4.0271]),
4.1596, {'avg': 4.1596}, None)
treatment = CategoryResults('Treatment', 0.9331,
[control_group, fast_group], None)
vr_real = GradientANOVAResults('avg', False, [description, weight, dob,
treatment])
self.vec_results = [vr, vr_real]
# This function makes the comparisons between the results classes easier
def assert_group_results_almost_equal(self, obs, exp):
"""Tests that obs and exp are almost equal"""
self.assertEqual(obs.name, exp.name)
npt.assert_almost_equal(obs.trajectory, exp.trajectory)
npt.assert_almost_equal(obs.mean, exp.mean)
self.assertEqual(obs.info.keys(), exp.info.keys())
for key in obs.info:
npt.assert_almost_equal(obs.info[key], exp.info[key])
self.assertEqual(obs.message, exp.message)
def assert_category_results_almost_equal(self, obs, exp):
"""Tests that obs and exp are almost equal"""
self.assertEqual(obs.category, exp.category)
if exp.probability is None:
self.assertTrue(obs.probability is None)
self.assertTrue(obs.groups is None)
else:
npt.assert_almost_equal(obs.probability, exp.probability)
for o, e in zip(sorted(obs.groups, key=attrgetter('name')),
sorted(exp.groups, key=attrgetter('name'))):
self.assert_group_results_almost_equal(o, e)
def assert_gradientANOVA_results_almost_equal(self, obs, exp):
"""Tests that obs and exp are almost equal"""
self.assertEqual(obs.algorithm, exp.algorithm)
self.assertEqual(obs.weighted, exp.weighted)
for o, e in zip(sorted(obs.categories, key=attrgetter('category')),
sorted(exp.categories, key=attrgetter('category'))):
self.assert_category_results_almost_equal(o, e)
class GradientTests(BaseTests):
def test_weight_by_vector(self):
"""Correctly weights the vectors"""
trajectory = pd.DataFrame.from_dict({'s1': np.array([1]),
's2': np.array([2]),
's3': np.array([3]),
's4': np.array([4]),
's5': np.array([5]),
's6': np.array([6]),
's7': np.array([7]),
's8': np.array([8])},
orient='index')
trajectory.sort(columns=0, inplace=True)
w_vector = pd.Series(np.array([1, 5, 8, 12, 45, 80, 85, 90]),
['s1', 's2', 's3', 's4',
's5', 's6', 's7', 's8']).astype(np.float64)
exp = pd.DataFrame.from_dict({'s1': np.array([1]),
's2': np.array([6.3571428571]),
's3': np.array([12.7142857142]),
's4': np.array([12.7142857142]),
's5': np.array([1.9264069264]),
's6': np.array([2.1795918367]),
's7': np.array([17.8]),
's8': np.array([20.3428571428])},
orient='index')
obs = _weight_by_vector(trajectory, w_vector)
pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
trajectory = pd.DataFrame.from_dict({'s1': np.array([1]),
's2': np.array([2]),
's3': np.array([3]),
's4': np.array([4]),
's5': np.array([5]),
's6': np.array([6]),
's7': np.array([7]),
's8': np.array([8])},
orient='index')
trajectory.sort(columns=0, inplace=True)
w_vector = pd.Series(np.array([1, 2, 3, 4, 5, 6, 7, 8]),
['s1', 's2', 's3', 's4',
's5', 's6', 's7', 's8']).astype(np.float64)
exp = pd.DataFrame.from_dict({'s1': np.array([1]), 's2': np.array([2]),
's3': np.array([3]), 's4': np.array([4]),
's5': np.array([5]), 's6': np.array([6]),
's7': np.array([7]), 's8': np.array([8])
},
orient='index')
obs = _weight_by_vector(trajectory, w_vector)
pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
trajectory = pd.DataFrame.from_dict({'s2': np.array([2]),
's3': np.array([3]),
's4': np.array([4]),
's5': np.array([5]),
's6': np.array([6])},
orient='index')
trajectory.sort(columns=0, inplace=True)
w_vector = pd.Series(np.array([25, 30, 35, 40, 45]),
['s2', 's3', 's4', 's5', 's6']).astype(np.float64)
exp = pd.DataFrame.from_dict({'s2': np.array([2]), 's3': np.array([3]),
's4': np.array([4]), 's5': np.array([5]),
's6': np.array([6])}, orient='index')
obs = _weight_by_vector(trajectory, w_vector)
pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
trajectory = pd.DataFrame.from_dict({'s1': np.array([1, 2, 3]),
's2': np.array([2, 3, 4]),
's3': np.array([5, 6, 7]),
's4': np.array([8, 9, 10])},
orient='index')
trajectory.sort(columns=0, inplace=True)
w_vector = pd.Series(np.array([1, 2, 3, 4]),
['s1', 's2', 's3', 's4']).astype(np.float64)
exp = pd.DataFrame.from_dict({'s1': np.array([1, 2, 3]),
's2': np.array([2, 3, 4]),
's3': np.array([5, 6, 7]),
's4': np.array([8, 9, 10])},
orient='index')
obs = _weight_by_vector(trajectory, w_vector)
pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
sample_ids = ['PC.356', 'PC.481', 'PC.355', 'PC.593', 'PC.354']
trajectory = pd.DataFrame.from_dict({'PC.356': np.array([5.65948525,
1.37977545,
-4.9706303]),
'PC.481': np.array([0.79151484,
-0.70387996,
1.89223152]),
'PC.355': np.array([6.05869624,
3.44821245,
-0.42595788]),
'PC.593': np.array([5.18731945,
-1.81714206,
4.26216485]),
'PC.354': np.array([7.07588529,
-0.53917873,
0.89389158])
}, orient='index')
w_vector = pd.Series(np.array([50, 52, 55, 57, 60]),
sample_ids).astype(np.float64)
exp = pd.DataFrame.from_dict({'PC.356': np.array([5.65948525,
1.37977545,
-4.9706303]),
'PC.481': np.array([0.98939355,
-0.87984995,
2.3652894]),
'PC.355': np.array([5.04891353,
2.87351038,
-0.3549649]),
'PC.593': np.array([6.48414931,
-2.27142757,
5.32770606]),
'PC.354': np.array([5.89657108,
-0.44931561,
0.74490965])
}, orient='index')
obs = _weight_by_vector(trajectory.ix[sample_ids],
w_vector[sample_ids])
pdt.assert_frame_equal(obs.sort(axis=0), exp.sort(axis=0))
def test_weight_by_vector_single_element(self):
trajectory = pd.DataFrame.from_dict({'s1': np.array([42])},
orient='index')
w_vector = pd.Series(np.array([5]), ['s1']).astype(np.float64)
obs = _weight_by_vector(trajectory, w_vector)
pdt.assert_frame_equal(obs, trajectory)
def test_weight_by_vector_error(self):
"""Raises an error with erroneous inputs"""
# Different vector lengths
with self.assertRaises(ValueError):
_weight_by_vector([1, 2, 3, 4], [1, 2, 3])
# Inputs are not iterables
with self.assertRaises(TypeError):
_weight_by_vector(9, 1)
# Weighting vector is not a gradient
with self.assertRaises(ValueError):
_weight_by_vector([1, 2, 3, 4], [1, 2, 3, 3])
def test_ANOVA_trajectories(self):
"""Correctly performs the check before running ANOVA"""
# Only one group in a given category
group = GroupResults('Bar', np.array([2.3694943596755276,
3.3716388181385781,
5.4452089176253367,
4.5704258453173559,
4.4972603724478377]),
4.05080566264, {'avg': 4.0508056626409275}, None)
obs = _ANOVA_trajectories('Foo', [group])
exp = CategoryResults('Foo', None, None,
'Only one value in the group.')
self.assert_category_results_almost_equal(obs, exp)
# One element have only one element
group2 = GroupResults('FooBar', np.array([4.05080566264]),
4.05080566264, {'avg': 4.05080566264}, None)
obs = _ANOVA_trajectories('Foo', [group, group2])
exp = CategoryResults('Foo', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
self.assert_category_results_almost_equal(obs, exp)
gr1 = GroupResults('Foo', np.array([-0.219044992, 0.079674486,
0.09233683]),
-0.015677892, {'avg': -0.015677892}, None)
gr2 = GroupResults('Bar', np.array([-0.042258081, 0.000204041,
0.024837603]),
-0.0732878716, {'avg': -0.0732878716}, None)
gr3 = GroupResults('FBF', np.array([0.080504323, -0.212014503,
-0.088353435]),
-0.0057388123, {'avg': -0.0057388123}, None)
obs = _ANOVA_trajectories('Cat', [gr1, gr2, gr3])
exp = CategoryResults('Cat', 0.8067456876, [gr1, gr2, gr3], None)
self.assert_category_results_almost_equal(obs, exp)
class GroupResultsTests(BaseTests):
def test_to_file(self):
out_paths = ['gr_wo_msg_out', 'gr_w_msg_out']
raw_paths = ['gr_wo_msg_raw', 'gr_w_msg_raw']
for gr, out_fp, raw_fp in zip(self.groups, out_paths, raw_paths):
obs_out_f = StringIO()
obs_raw_f = StringIO()
gr.to_files(obs_out_f, obs_raw_f)
obs_out = obs_out_f.getvalue()
obs_raw = obs_raw_f.getvalue()
obs_out_f.close()
obs_raw_f.close()
with open(get_data_path(out_fp), 'U') as f:
exp_out = f.read()
with open(get_data_path(raw_fp), 'U') as f:
exp_raw = f.read()
self.assertEqual(obs_out, exp_out)
self.assertEqual(obs_raw, exp_raw)
class CategoryResultsTests(BaseTests):
def test_to_file(self):
out_paths = ['cr_no_data_out', 'cr_data_out']
raw_paths = ['cr_no_data_raw', 'cr_data_raw']
for cat, out_fp, raw_fp in zip(self.categories, out_paths, raw_paths):
obs_out_f = StringIO()
obs_raw_f = StringIO()
cat.to_files(obs_out_f, obs_raw_f)
obs_out = obs_out_f.getvalue()
obs_raw = obs_raw_f.getvalue()
obs_out_f.close()
obs_raw_f.close()
with open(get_data_path(out_fp), 'U') as f:
exp_out = f.read()
with open(get_data_path(raw_fp), 'U') as f:
exp_raw = f.read()
self.assertEqual(obs_out, exp_out)
self.assertEqual(obs_raw, exp_raw)
class GradientANOVAResultsTests(BaseTests):
def test_to_file(self):
out_paths = ['vr_out']
raw_paths = ['vr_raw']
for vr, out_fp, raw_fp in zip(self.vec_results, out_paths, raw_paths):
obs_out_f = StringIO()
obs_raw_f = StringIO()
vr.to_files(obs_out_f, obs_raw_f)
obs_out = obs_out_f.getvalue()
obs_raw = obs_raw_f.getvalue()
obs_out_f.close()
obs_raw_f.close()
with open(get_data_path(out_fp), 'U') as f:
exp_out = f.read()
with open(get_data_path(raw_fp), 'U') as f:
exp_raw = f.read()
self.assertEqual(obs_out, exp_out)
self.assertEqual(obs_raw, exp_raw)
class GradientANOVATests(BaseTests):
def test_init(self):
"""Correctly initializes the class attributes"""
# Note self._groups is tested on test_make_groups
# so we are not testing it here
# Test with weighted = False
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
pdt.assert_frame_equal(bv._coords, self.coords_3axes)
exp_prop_expl = np.array([25.6216900347, 15.7715955926,
14.1215046787])
npt.assert_equal(bv._prop_expl, exp_prop_expl)
pdt.assert_frame_equal(bv._metadata_map, self.metadata_map)
self.assertTrue(bv._weighting_vector is None)
self.assertFalse(bv._weighted)
# Test with weighted = True
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
sort_category='Weight', weighted=True)
pdt.assert_frame_equal(bv._coords, self.coords_3axes)
npt.assert_equal(bv._prop_expl, exp_prop_expl)
pdt.assert_frame_equal(bv._metadata_map, self.metadata_map)
exp_weighting_vector = pd.Series(
np.array([60, 55, 50, 52, 57, 65, 68, 70, 72]),
['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593', 'PC.607',
'PC.634', 'PC.635', 'PC.636']
).astype(np.float64)
pdt.assert_series_equal(bv._weighting_vector, exp_weighting_vector)
self.assertTrue(bv._weighted)
def test_init_error(self):
"""Raises an error with erroneous inputs"""
# Raises ValueError if any category in trajectory_categories is not
# present in metadata_map
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
trajectory_categories=['foo'])
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
trajectory_categories=['Weight', 'Treatment', 'foo'])
# Raises ValueError if sort_category is not present in metadata_map
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
sort_category='foo')
# Raises ValueError if weighted == True and sort_category == None
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
weighted=True)
# Raises ValueError if weighted == True and the values under
# sort_category are not numerical
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
sort_category='Treatment', weighted=True)
# Raises ValueError if axes > len(prop_expl)
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
axes=10)
# Raises ValueError if axes < 0
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
axes=-1)
def test_normalize_samples(self):
"""Correctly normalizes the samples between coords and metadata_map"""
coord_data = {
'PC.636': np.array([-0.212230626531, 0.216034194368,
0.03532727349]),
'PC.635': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992]),
'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424]),
'PC.607': np.array([-0.105517545144, -0.41405687433,
-0.150073017617]),
'PC.634': np.array([-0.371636765565, 0.115484234741,
0.0721996475289])
}
subset_coords = pd.DataFrame.from_dict(coord_data, orient='index')
metadata_map = {'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'PC.607': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'},
'PC.634': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '68',
'Description': 'Fasting_mouse_I.D._634'},
'PC.635': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '70',
'Description': 'Fasting_mouse_I.D._635'},
'PC.636': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '72',
'Description': 'Fasting_mouse_I.D._636'}}
subset_metadata_map = pd.DataFrame.from_dict(metadata_map,
orient='index')
# Takes a subset from metadata_map
bv = GradientANOVA(subset_coords, self.prop_expl, self.metadata_map)
pdt.assert_frame_equal(bv._coords.sort(axis=0),
subset_coords.sort(axis=0))
pdt.assert_frame_equal(bv._metadata_map.sort(axis=0),
subset_metadata_map.sort(axis=0))
# Takes a subset from coords
bv = GradientANOVA(self.coords, self.prop_expl, subset_metadata_map)
pdt.assert_frame_equal(bv._coords.sort(axis=0),
subset_coords.sort(axis=0))
pdt.assert_frame_equal(bv._metadata_map.sort(axis=0),
subset_metadata_map.sort(axis=0))
# Takes a subset from metadata_map and coords at the same time
coord_data = {
'PC.636': np.array([-0.212230626531, 0.216034194368,
0.03532727349]),
'PC.635': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992]),
'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424])
}
subset_coords = pd.DataFrame.from_dict(coord_data, orient='index')
metadata_map = {'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'PC.607': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'},
'PC.634': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '68',
'Description': 'Fasting_mouse_I.D._634'}}
subset_metadata_map = pd.DataFrame.from_dict(metadata_map,
orient='index')
bv = GradientANOVA(subset_coords, self.prop_expl, subset_metadata_map)
exp_coords = pd.DataFrame.from_dict(
{'PC.355': np.array([0.236467470907, 0.21863434374,
-0.0301637746424])},
orient='index')
pdt.assert_frame_equal(bv._coords.sort(axis=0),
exp_coords.sort(axis=0))
exp_metadata_map = pd.DataFrame.from_dict(
{'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'}},
orient='index')
pdt.assert_frame_equal(bv._metadata_map.sort(axis=0),
exp_metadata_map.sort(axis=0))
def test_normalize_samples_error(self):
"""Raises an error if coords and metadata_map does not have samples in
common"""
error_metadata_map = pd.DataFrame.from_dict(
{'Foo': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'Bar': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'}},
orient='index')
with self.assertRaises(ValueError):
GradientANOVA(self.coords, self.prop_expl, error_metadata_map)
def test_make_groups(self):
"""Correctly generates the groups for trajectory_categories"""
# Test with all categories
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
exp_groups = {'Treatment': {'Control': ['PC.354', 'PC.355', 'PC.356',
'PC.481', 'PC.593'],
'Fast': ['PC.607', 'PC.634',
'PC.635', 'PC.636']},
'DOB': {'20061218': ['PC.354', 'PC.355'],
'20061126': ['PC.356'],
'20070314': ['PC.481'],
'20071210': ['PC.593'],
'20071112': ['PC.607'],
'20080116': ['PC.634', 'PC.635', 'PC.636']},
'Weight': {'60': ['PC.354'],
'55': ['PC.355'],
'50': ['PC.356'],
'52': ['PC.481'],
'57': ['PC.593'],
'65': ['PC.607'],
'68': ['PC.634'],
'70': ['PC.635'],
'72': ['PC.636']},
'Description': {'Control_mouse_I.D._354': ['PC.354'],
'Control_mouse_I.D._355': ['PC.355'],
'Control_mouse_I.D._356': ['PC.356'],
'Control_mouse_I.D._481': ['PC.481'],
'Control_mouse_I.D._593': ['PC.593'],
'Fasting_mouse_I.D._607': ['PC.607'],
'Fasting_mouse_I.D._634': ['PC.634'],
'Fasting_mouse_I.D._635': ['PC.635'],
'Fasting_mouse_I.D._636': ['PC.636']}}
self.assertEqual(bv._groups, exp_groups)
# Test with user-defined categories
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map,
trajectory_categories=['Treatment', 'DOB'])
exp_groups = {'Treatment': {'Control': ['PC.354', 'PC.355', 'PC.356',
'PC.481', 'PC.593'],
'Fast': ['PC.607', 'PC.634',
'PC.635', 'PC.636']},
'DOB': {'20061218': ['PC.354', 'PC.355'],
'20061126': ['PC.356'],
'20070314': ['PC.481'],
'20071210': ['PC.593'],
'20071112': ['PC.607'],
'20080116': ['PC.634', 'PC.635', 'PC.636']}}
self.assertEqual(bv._groups, exp_groups)
def test_make_groups_natural_sorting(self):
# Ensure sample IDs are sorted using a natural sorting algorithm.
df = pd.DataFrame.from_dict({
'a2': {'Col1': 'foo', 'Col2': '1.0'},
'a1': {'Col1': 'bar', 'Col2': '-42.0'},
'a11.0': {'Col1': 'foo', 'Col2': '2e-5'},
'a-10': {'Col1': 'foo', 'Col2': '5'},
'a10': {'Col1': 'bar', 'Col2': '5'}},
orient='index')
coords = pd.DataFrame.from_dict({
'a10': np.array([-0.212230626531, 0.216034194368, 0.03532727349]),
'a11.0': np.array([-0.277487312135, -0.0295483215975,
-0.0744173437992]),
'a1': np.array([0.220886492631, 0.0874848360559,
-0.351990132198]),
'a2': np.array([0.0308923744062, -0.0446295973489,
0.133996451689]),
'a-10': np.array([0.27616778138, -0.0341866951102,
0.0633000238256])},
orient='index')
prop_expl = np.array([25.6216900347, 15.7715955926, 14.1215046787,
11.6913885817, 9.83044890697])
# Sort by sample IDs.
ga = GradientANOVA(coords, prop_expl, df)
exp_groups = {
'Col1': {
'foo': ['a-10', 'a2', 'a11.0'],
'bar': ['a1', 'a10']
},
'Col2': {
'1.0': ['a2'],
'-42.0': ['a1'],
'2e-5': ['a11.0'],
'5': ['a-10', 'a10']
}
}
self.assertEqual(ga._groups, exp_groups)
# Sort sample IDs by Col2.
ga = GradientANOVA(coords, prop_expl, df,
trajectory_categories=['Col1'],
sort_category='Col2')
exp_groups = {
'Col1': {
'foo': ['a11.0', 'a2', 'a-10'],
'bar': ['a1', 'a10']
}
}
self.assertEqual(ga._groups, exp_groups)
def test_get_trajectories(self):
"""Should raise a NotImplementedError as this is a base class"""
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
with self.assertRaises(NotImplementedError):
bv.get_trajectories()
def test_get_group_trajectories(self):
"""Should raise a NotImplementedError in usual execution as this is
a base class"""
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
with self.assertRaises(NotImplementedError):
bv.get_trajectories()
def test_get_group_trajectories_error(self):
"""Should raise a RuntimeError if the user call _get_group_trajectories
with erroneous inputs"""
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
with self.assertRaises(RuntimeError):
bv._get_group_trajectories("foo", ['foo'])
with self.assertRaises(RuntimeError):
bv._get_group_trajectories("bar", [])
def test_compute_trajectories_results(self):
"""Should raise a NotImplementedError as this is a base class"""
bv = GradientANOVA(self.coords, self.prop_expl, self.metadata_map)
with self.assertRaises(NotImplementedError):
bv._compute_trajectories_results("foo", [])
class AverageGradientANOVATests(BaseTests):
def test_get_trajectories_all(self):
"""get_trajectories returns the results of all categories"""
av = AverageGradientANOVA(self.coords, self.prop_expl,
self.metadata_map)
obs = av.get_trajectories()
exp_description = CategoryResults('Description', None, None,
'This group can not be used. All '
'groups should have more than 1 '
'element.')
exp_weight = CategoryResults('Weight', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
exp_dob = CategoryResults('DOB', None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
exp_control_group = GroupResults('Control',
np.array([2.3694943596755276,
3.3716388181385781,
5.4452089176253367,
4.5704258453173559,
4.4972603724478377]),
4.05080566264,
{'avg': 4.0508056626409275}, None)
exp_fast_group = GroupResults('Fast', np.array([7.2220488239279126,
4.2726021564374372,
1.1169097274372082,
4.02717600030876]),
4.15968417703,
{'avg': 4.1596841770278292}, None)
exp_treatment = CategoryResults('Treatment', 0.93311555,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('avg', False, [exp_description, exp_weight,
exp_dob, exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_single(self):
"""get_trajectories returns the results of the provided category"""
av = AverageGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'])
obs = av.get_trajectories()
exp_control_group = GroupResults('Control',
np.array([2.3694943596755276,
3.3716388181385781,
5.4452089176253367,
4.5704258453173559,
4.4972603724478377]),
4.05080566264,
{'avg': 4.0508056626409275}, None)
exp_fast_group = GroupResults('Fast', np.array([7.2220488239279126,
4.2726021564374372,
1.1169097274372082,
4.02717600030876]),
4.15968417703,
{'avg': 4.1596841770278292}, None)
exp_treatment = CategoryResults('Treatment', 0.93311555,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('avg', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_weighted(self):
"""get_trajectories returns the correct weighted results"""
av = AverageGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight', weighted=True)
obs = av.get_trajectories()
exp_control_group = GroupResults('Control', np.array([5.7926887872,
4.3242308936,
2.9212403501,
5.5400792151,
1.2326804315]),
3.9621839355,
{'avg': 3.9621839355}, None)
exp_fast_group = GroupResults('Fast', np.array([7.2187223286,
2.5522161282,
2.2349795861,
4.5278215248]),
4.1334348919,
{'avg': 4.1334348919}, None)
exp_treatment = CategoryResults('Treatment', 0.9057666800,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('avg', True, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
class TrajectoryGradientANOVATests(BaseTests):
def test_get_trajectories(self):
tv = TrajectoryGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight')
obs = tv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([8.6681963576,
7.0962717982,
7.1036434615,
4.0675712674]),
6.73392072123,
{'2-norm': 13.874494152}, None)
exp_fast_group = GroupResults('Fast', np.array([11.2291654905,
3.9163741156,
4.4943507388]),
6.5466301150,
{'2-norm': 12.713431181}, None)
exp_treatment = CategoryResults('Treatment', 0.9374500147,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('trajectory', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_weighted(self):
tv = TrajectoryGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight', weighted=True)
obs = tv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([8.9850643421,
6.1617529749,
7.7989125908,
4.9666249268]),
6.9780887086,
{'2-norm': 14.2894710091}, None)
exp_fast_group = GroupResults('Fast', np.array([9.6823682852,
2.9511115209,
5.2434091953]),
5.9589630005,
{'2-norm': 11.3995901159}, None)
exp_treatment = CategoryResults('Treatment', 0.6248157720,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('trajectory', True, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
class FirstDifferenceGradientANOVATests(BaseTests):
def test_get_trajectories(self):
dv = FirstDifferenceGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight')
obs = dv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([-1.5719245594,
0.0073716633,
-3.0360721941]),
-1.5335416967,
{'mean': -1.5335416967,
'std': 1.2427771485}, None)
exp_fast_group = GroupResults('Fast', np.array([-7.3127913749,
0.5779766231]),
-3.3674073758,
{'mean': -3.3674073758,
'std': 3.9453839990}, None)
exp_treatment = CategoryResults('Treatment', 0.6015260608,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('diff', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_weighted(self):
dv = FirstDifferenceGradientANOVA(self.coords, self.prop_expl,
self.metadata_map,
trajectory_categories=['Treatment'],
sort_category='Weight',
weighted=True)
obs = dv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([-2.8233113671,
1.6371596158,
-2.8322876639]),
-1.3394798050,
{'mean': -1.3394798050,
'std': 2.1048051097}, None)
exp_fast_group = GroupResults('Fast', np.array([-6.7312567642,
2.2922976743]),
-2.2194795449,
{'mean': -2.2194795449,
'std': 4.5117772193}, None)
exp_treatment = CategoryResults('Treatment', 0.8348644420,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('diff', True, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
class WindowDifferenceGradientANOVATests(BaseTests):
def test_get_trajectories(self):
wdv = WindowDifferenceGradientANOVA(
self.coords, self.prop_expl, self.metadata_map, 3,
trajectory_categories=['Treatment'], sort_category='Weight')
obs = wdv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([-2.5790341819,
-2.0166764661,
-3.0360721941,
0.]),
-1.9079457105,
{'mean': -1.9079457105,
'std': 1.1592139913}, None)
exp_fast_group = GroupResults('Fast', np.array([11.2291654905,
3.9163741156,
4.4943507388]),
6.5466301150,
{'mean': 6.5466301150,
'std': 3.3194494926},
"Cannot calculate the first difference "
"with a window of size (3).")
exp_treatment = CategoryResults('Treatment', 0.0103976830,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('wdiff', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_get_trajectories_weighted(self):
wdv = WindowDifferenceGradientANOVA(
self.coords, self.prop_expl, self.metadata_map, 3,
trajectory_categories=['Treatment'], sort_category='Weight',
weighted=True)
obs = wdv.get_trajectories()
exp_control_group = GroupResults('Control', np.array([-2.6759675112,
-0.2510321601,
-2.8322876639,
0.]),
-1.4398218338,
{'mean': -1.4398218338,
'std': 1.31845790844}, None)
exp_fast_group = GroupResults('Fast', np.array([9.6823682852,
2.9511115209,
5.2434091953]),
5.9589630005,
{'mean': 5.9589630005,
'std': 2.7942163293},
"Cannot calculate the first difference "
"with a window of size (3).")
exp_treatment = CategoryResults('Treatment', 0.0110675605,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('wdiff', True, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
if __name__ == '__main__':
main()
| bsd-3-clause |
darioizzo/pagmo | PyGMO/problem/_tsp.py | 7 | 16114 | from PyGMO.problem._problem import tsp, tsp_cs, tsp_vrplc, _tsp_encoding
from PyGMO import __extensions__
if __extensions__["gtop"] is True:
from PyGMO.problem._problem_space import tsp_ds
tsp_ds.encoding_type = _tsp_encoding
# Renaming and placing the enums
tsp.encoding_type = _tsp_encoding
tsp_vrplc.encoding_type = _tsp_encoding
tsp_cs.encoding_type = _tsp_encoding
def _tsp_ctor(self, weights=[[0, 1, 2], [1, 0, 5], [2, 5, 0]], type="cities"):
"""
Constructs Travelling Salesman Problem (TSP or ATSP)
The problem encoding can be of three different types as
selected by the type kwarg
1-"cities"
This encoding represents the ids of the cities visited
directly in the chromosome. It will
thus create a constrained problem as only permutation of the
cities ids are valid (e.g. [0,2,1,5,0] is not
a valid chromosome)
2-"randomkeys"
This encoding, first introduced in the paper
Bean, J. C. (1994). Genetic algorithms and random keys for
sequencing and optimization. ORSA journal on computing, 6(2), 154-160.
It creates a box constrained problem without any constraint.
It essentially represents the tour as a sequence of doubles bounded
in [0,1]. The tour is reconstructed by the argsort of the sequence.
(e.g. [0.34,0.12,0.76,0.03] -> [3,1,0,2])
3-"full"
In the full encoding the TSP is represented as a integer linear
programming problem. The details can be found in
http://en.wikipedia.org/wiki/Travelling_salesman_problem
Constructs a Travelling Salesman problem
(Constrained Integer Single-Objective)
USAGE: problem.tsp(weights = [0,1,2],[1,0,5],[2,5,0], type="randomkeys")
* weights: Square matrix with zero diagonal entries containing the cities distances.
* type: encoding type. One of "cities","randomkeys","full"
"""
# We construct the arg list for the original constructor exposed by
# boost_python
from PyGMO.problem._problem import _tsp_encoding
def encoding_type(x):
return {
"cities": _tsp_encoding.CITIES,
"randomkeys": _tsp_encoding.RANDOMKEYS,
"full": _tsp_encoding.FULL
}[x]
arg_list = []
arg_list.append(weights)
arg_list.append(encoding_type(type))
self._orig_init(*arg_list)
tsp._orig_init = tsp.__init__
tsp.__init__ = _tsp_ctor
def _tsp_cs_ctor(self, weights=[[0, 1, 2], [1, 0, 5], [2, 5, 0]], values=[1, 1, 1], max_path_length=2, type="cities"):
"""
Constructs Travelling Salesman Problem City-Selection (TSP-CS)
The problem encoding can be of three different types as
selected by the type kwarg
1-"cities"
This encoding represents the ids of the cities visited
directly in the chromosome. It will
thus create a constrained problem as only permutation of the
cities ids are valid (e.g. [0,2,1,5,0] is not
a valid chromosome)
2-"randomkeys"
This encoding, first introduced in the paper
Bean, J. C. (1994). Genetic algorithms and random keys for
sequencing and optimization. ORSA journal on computing, 6(2), 154-160.
It creates a box constrained problem without any constraint.
It essentially represents the tour as a sequence of doubles bounded
in [0,1]. The tour is reconstructed by the argsort of the sequence.
(e.g. [0.34,0.12,0.76,0.03] -> [3,1,0,2])
3-"full"
In the full encoding the TSP is represented as a integer linear
programming problem. The details can be found in
http://en.wikipedia.org/wiki/Travelling_salesman_problem
Constructs a Travelling Salesman problem
(Constrained Integer Single-Objective)
USAGE: problem.tsp_cs(weights=[[0, 1, 2], [1, 0, 5], [2, 5, 0]], values=[1, 1, 1], max_path_length=2, type="cities")
* weights: Square matrix with zero diagonal entries containing the cities distances.
* values: The city values.
* max_path_length: maximum length the salesman can walk
* type: encoding type. One of "cities","randomkeys","full"
"""
# We construct the arg list for the original constructor exposed by
# boost_python
from PyGMO.problem._problem import _tsp_encoding
def encoding_type(x):
return {
"cities": _tsp_encoding.CITIES,
"randomkeys": _tsp_encoding.RANDOMKEYS,
"full": _tsp_encoding.FULL
}[x]
arg_list = []
arg_list.append(weights)
arg_list.append(values)
arg_list.append(max_path_length)
arg_list.append(encoding_type(type))
self._orig_init(*arg_list)
tsp_cs._orig_init = tsp_cs.__init__
tsp_cs.__init__ = _tsp_cs_ctor
def _tsp_vrplc_ctor(self, weights=[[0, 1, 2], [1, 0, 5], [2, 5, 0]], type="cities", capacity=1.1):
"""
Constructs Vehicle routing problem with limited capacity.
This is a variant to the TSP that asks to find n-tours of length
smaller than the maximum vehicle capacity that visit all cities.
The objective is to minimize n
The problem encoding can be of three different types as
selected by the type kwarg
1-"cities"
This encoding represents the ids of the cities visited
directly in the chromosome. It will
thus create a constrained problem as only permutation of the
cities ids are valid (e.g. [0,2,1,5,0] is not
a valid chromosome)
2-"randomkeys"
This encoding, first introduced in the paper
Bean, J. C. (1994). Genetic algorithms and random keys for
sequencing and optimization. ORSA journal on computing, 6(2), 154-160.
It creates a box constrained problem without any constraint.
It essentially represents the tour as a sequence of doubles bounded
in [0,1]. The tour is reconstructed by the argsort of the sequence.
(e.g. [0.34,0.12,0.76,0.03] -> [3,1,0,2])
3-"full"
In the full encoding the TSP is represented as a integer linear
programming problem. The details can be found in
http://en.wikipedia.org/wiki/Travelling_salesman_problem
Constructs a Travelling Salesman problem
(Constrained Integer Single-Objective)
USAGE: problem.tsp(matrix = [0,1,2],[1,0,5],[2,5,0], type="randomkeys", capacity=1.1)
* weights: Square matrix with zero diagonal entries containing the cities distances.
* type: encoding type. One of "cities","randomkeys","full"
* capacity: maximum vehicle capacity
"""
from PyGMO.problem._problem import _tsp_encoding
def encoding_type(x):
return {
"cities": _tsp_encoding.CITIES,
"randomkeys": _tsp_encoding.RANDOMKEYS,
"full": _tsp_encoding.FULL
}[x]
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
arg_list.append(weights)
arg_list.append(encoding_type(type))
arg_list.append(capacity)
self._orig_init(*arg_list)
tsp_vrplc._orig_init = tsp_vrplc.__init__
tsp_vrplc.__init__ = _tsp_vrplc_ctor
def _plot_tsp(self, x, node_size=10, edge_color='r',
edge_width=1, bias=None, node_color=None, pos=None):
"""
Plots a tour represented in the chromosome x
(using the same encoding of the self object)
USAGE: problem._plot_tsp(x, node_size=10, edge_color='r',
edge_width=1, bias=None, node_color=None, pos=None):
* x: Crhomosome encoding the city tour.
The encoding type used must be the same as that of self
* node_size: size of the nodes in the graph visualization
* edge_color: size of the edges in the graph visualization
* edge_width: width of the edges in the graph visualization
* bias: when the graoh node positions are not used,
the plot tries to use
a spring model to place the nodes. The spring
constants depend on this
bias parameter
* node_color: color of the nodes in the graph visualization
* pos: a dictionary containing the node positions
(same format as networkx)
"""
if not (self.verify_x(x) and self.feasibility_x(x)):
raise Exception("crhomosome is unfeasible")
from matplotlib import pyplot as plt
import networkx as nx
import numpy as np
from PyGMO.problem import tsp
fig = plt.gcf()
axis = plt.gca()
# We extract few informations on the problem
weights = self.weights
n_cities = len(weights[0])
if self.encoding == _tsp_encoding.RANDOMKEYS:
edgelist = self.randomkeys2cities(x)
elif self.encoding == _tsp_encoding.CITIES:
edgelist = x
elif self.encoding == _tsp_encoding.FULL:
edgelist = self.full2cities(x)
# We construct the list of edges (u,v) containing
# the indices of the cities visited and we here distinguish between tsp types
if type(self) == tsp:
edgelist = [(edgelist[i], edgelist[i + 1]) for i in range(n_cities - 1)] + [(edgelist[-1], edgelist[0])]
elif type(self) == tsp_cs:
_, _, id1, id2 = self.find_city_subsequence(x)
if id1 <= id2:
edgelist = edgelist[id1:(id2 + 1) % n_cities]
else:
edgelist = edgelist[id1:] + edgelist[:id2 + 1]
edgelist = [(edgelist[i], edgelist[i + 1]) for i in range(len(edgelist) - 1)]
elif type(self) == tsp_vrplc:
stl = 0
chromosome = edgelist
edgelist = [(chromosome[0], chromosome[1])]
for i in range(1, n_cities - 1):
stl += weights[int(chromosome[i])][int(chromosome[i + 1])]
if stl > self.capacity:
stl = 0
else:
edgelist += [(chromosome[i], chromosome[i + 1])]
if bias is None:
bias = max([max(d) for d in weights])
# We create a networkx graph
G = nx.Graph()
# We fill in the vertices
for i in range(n_cities):
G.add_node(i)
# We fill in all the edges
for i in range(n_cities):
for j in range(n_cities):
if i <= j:
continue
G.add_edge(i, j, weight=bias / weights[i][j])
# If cities coordinates are not passed as an input we try to calculate
# the coordinates for an euclidian TSP (assuming symmetric weights)
if pos is None:
# assign the first two nodes: node 0 and node 1, node 0 is
# chosen to be in the origin
pos = {0: np.array([0, 0]), 1: np.array([weights[0][1], 0])}
# algorithm checks during computation of the coordinates if the
# problem is euclidian
prob_is_eucl = True
# we will have to store the first node that is not located in the
# line constructed by the initial two nodes 0 and 1
nil_idx = -1
i = 2
while (i < n_cities and prob_is_eucl is True):
# we compute cos(alpha) where alpha is the angle enclosed
# by the edge (0,1) and (0,i)
cos_alpha = 0.5 * ((weights[0][i]) ** 2 + (weights[0][1]) ** 2 -
(weights[1][i]) ** 2) / (weights[0][i] * weights[0][1])
if (cos_alpha < -1 or 1 < cos_alpha):
prob_is_eucl = False
else:
# computes one of the two possible positions for node i
pos[i] = np.array([weights[0][i] * cos_alpha,
weights[0][i] * (1 - cos_alpha ** 2) ** (0.5)])
omega = 1
if abs(cos_alpha) != 1:
# as soon as one node is not aligned with edge (0,1)
# we have to orientate the plot the first node not aligned,
# named nil_idx, is chosen to have a positive second
# coordinate - every following node is then oriented
# accordingly
if nil_idx == -1:
nil_idx = i
elif abs(((pos[i][0] - pos[nil_idx][0]) ** 2 +
(pos[i][1] - pos[nil_idx][1]) ** 2) ** (0.5) -
weights[i][nil_idx]) > 1e-08 * weights[i][nil_idx]:
omega = -1
pos[i][1] = omega * pos[i][1] # orient node
# We have to check the distance to all the previous
# nodes to decide if the problem is euclidian
for j in range(2, i):
if abs(((pos[i][0] - pos[j][0]) ** 2 +
(pos[i][1] - pos[j][1]) ** 2) ** (0.5) -
weights[i][j]) > 1e-08 * weights[i][j]:
prob_is_eucl = False
i += 1
# In case of a non euclidian TSP we create a spring model
if prob_is_eucl is False:
pos = nx.layout.spring_layout(G)
if node_color is None:
node_color = [0.4] * n_cities
nx.draw_networkx_nodes(G, pos=pos, node_size=node_size,
cmap=plt.cm.Blues, node_color=node_color, ax=axis)
nx.draw_networkx_edges(G, pos, edgelist=edgelist,
width=edge_width, alpha=1, edge_color=edge_color, ax=axis)
fig.canvas.draw()
plt.show()
return pos
tsp.plot = _plot_tsp
tsp_cs.plot = _plot_tsp
tsp_vrplc.plot = _plot_tsp
if __extensions__["gtop"] is True:
def _tsp_ds_ctor(self, planets, values, max_DV, epochs, type="cities"):
"""
Constructs Travelling Salesman Problem City-Selection (TSP-CS)
The problem encoding can be of three different types as
selected by the type kwarg
1-"cities"
This encoding represents the ids of the cities visited
directly in the chromosome. It will
thus create a constrained problem as only permutation of the
cities ids are valid (e.g. [0,2,1,5,0] is not
a valid chromosome)
2-"randomkeys"
This encoding, first introduced in the paper
Bean, J. C. (1994). Genetic algorithms and random keys for
sequencing and optimization. ORSA journal on computing, 6(2), 154-160.
It creates a box constrained problem without any constraint.
It essentially represents the tour as a sequence of doubles bounded
in [0,1]. The tour is reconstructed by the argsort of the sequence.
(e.g. [0.34,0.12,0.76,0.03] -> [3,1,0,2])
3-"full"
In the full encoding the TSP is represented as a integer linear
programming problem. The details can be found in
http://en.wikipedia.org/wiki/Travelling_salesman_problem
Constructs a Travelling Salesman problem
(Constrained Integer Single-Objective)
USAGE: problem.tsp_cs(planets, values, max_DV, epochs, type="cities"):
* planets: list of planets
* values: list of planets values
* max_DV: maximum DV on-board
* epochs: list of allowed epochs for the visit (in MJD2000)
* type: encoding type. One of "cities","randomkeys","full"
"""
# We construct the arg list for the original constructor exposed by
# boost_python
from PyGMO.problem._problem import _tsp_encoding
def encoding_type(x):
return {
"cities": _tsp_encoding.CITIES,
"randomkeys": _tsp_encoding.RANDOMKEYS,
"full": _tsp_encoding.FULL
}[x]
arg_list = []
arg_list.append(planets)
arg_list.append(values)
arg_list.append(max_DV)
arg_list.append(epochs)
arg_list.append(encoding_type(type))
self._orig_init(*arg_list)
tsp_ds._orig_init = tsp_ds.__init__
tsp_ds.__init__ = _tsp_ds_ctor
| gpl-3.0 |
gjermv/potato | sccs/gpx/addMissingData.py | 1 | 3208 | '''
Created on 22 Feb 2016
@author: gjermund.vingerhagen
'''
from lxml import etree as etree
from gpx import utmconverter as utm
from gpx import algos as algos
from gpx import gpxtricks as gpxtricks
from gpx import dtmdata as dtm
import pandas as pd
from datetime import datetime as dt
from datetime import timedelta as dtt
from matplotlib import pyplot as plt
import time
import googlemaps
import glob
import datetime
def calcTime(startTime,avgSpeed,dist):
t = datetime.datetime.strptime(startTime, "%Y-%m-%dT%H:%M:%SZ")
sec = dist/avgSpeed
t2 = t + datetime.timedelta(seconds=sec)
print(t2.strftime("%Y-%m-%dT%H:%M:%SZ"))
return t2.strftime("%Y-%m-%dT%H:%M:%SZ")
def GPXaddTimes(filename,startTime,avgSpeed=5):
""" Reads a gpx file ' """
f = open(filename,encoding='utf-8')
ns = gpxtricks.findNamespace(f)
xml = etree.parse(f)
f.close()
trks = xml.iterfind(ns+'trk')
tmp_lat = 0
tmp_lon = 0
dist = 0
for trk in trks:
trksegs = trk.iterfind(ns+'trkseg')
for trkseg in trksegs:
points = trkseg.iterfind(ns+'trkpt')
for i,point in enumerate(points):
if i == 0:
tmp_lat = float(point.attrib['lat'])
tmp_lon = float(point.attrib['lon'])
txt = etree.SubElement(point, "time")
txt.text = startTime
else:
lat = float(point.attrib['lat'])
lon = float(point.attrib['lon'])
length = round(utm.haversine(tmp_lon,tmp_lat,lon,lat),5)
dist += length
txt = etree.SubElement(point, "time")
txt.text = calcTime(startTime,avgSpeed,dist)
tmp_lat,tmp_lon = lat,lon
print(etree.tostring(xml,pretty_print= True))
f = open('C:\\python\\testdata\\outgpx.gpx','wb')
f.write(etree.tostring(xml,pretty_print= True))
f.close()
def GPXaddElevation(filename):
f = open(filename,encoding='utf-8')
ns = gpxtricks.findNamespace(f)
xml = etree.parse(f)
f.close()
trks = xml.iterfind(ns+'trk')
tmp_lat = 0
tmp_lon = 0
dist = 0
for trk in trks:
trksegs = trk.iterfind(ns+'trkseg')
for trkseg in trksegs:
points = trkseg.iterfind(ns+'trkpt')
for i,point in enumerate(points):
lat = float(point.attrib['lat'])
lon = float(point.attrib['lon'])
ele = etree.SubElement(point, "ele")
ele.text = str(round(dtm.calculateEle(lat, lon,coordsys='latlon'),1))
print(str(ele.text))
print(etree.tostring(xml,pretty_print= True))
f = open('C:\\python\\testdata\\outgpx.gpx','wb')
f.write(etree.tostring(xml,pretty_print= True))
f.close()
#GPXaddTimes('C:\\python\\testdata\\apecs\\2016-04-24_Coton.gpx','2016-04-24T09:11:04Z',2.91054113)
GPXaddElevation('C:\\Users\\gjermund.vingerhagen\\Downloads\\0718061706-52130.gpx') | gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.